repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
obulpathi/cdn-fastly-python
|
fastly/__init__.py
|
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L499-L502
|
def list_healthchecks(self, service_id, version_number):
"""List all of the healthchecks for a particular service and version."""
content = self._fetch("/service/%s/version/%d/healthcheck" % (service_id, version_number))
return map(lambda x: FastlyHealthCheck(self, x), content)
|
[
"def",
"list_healthchecks",
"(",
"self",
",",
"service_id",
",",
"version_number",
")",
":",
"content",
"=",
"self",
".",
"_fetch",
"(",
"\"/service/%s/version/%d/healthcheck\"",
"%",
"(",
"service_id",
",",
"version_number",
")",
")",
"return",
"map",
"(",
"lambda",
"x",
":",
"FastlyHealthCheck",
"(",
"self",
",",
"x",
")",
",",
"content",
")"
] |
List all of the healthchecks for a particular service and version.
|
[
"List",
"all",
"of",
"the",
"healthchecks",
"for",
"a",
"particular",
"service",
"and",
"version",
"."
] |
python
|
train
|
klahnakoski/pyLibrary
|
pyLibrary/sql/mysql.py
|
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/pyLibrary/sql/mysql.py#L86-L116
|
def _open(self):
""" DO NOT USE THIS UNLESS YOU close() FIRST"""
try:
self.db = connect(
host=self.settings.host,
port=self.settings.port,
user=coalesce(self.settings.username, self.settings.user),
passwd=coalesce(self.settings.password, self.settings.passwd),
db=coalesce(self.settings.schema, self.settings.db),
read_timeout=coalesce(self.settings.read_timeout, (EXECUTE_TIMEOUT / 1000) - 10 if EXECUTE_TIMEOUT else None, 5*60),
charset=u"utf8",
use_unicode=True,
ssl=coalesce(self.settings.ssl, None),
cursorclass=cursors.SSCursor
)
except Exception as e:
if self.settings.host.find("://") == -1:
Log.error(
u"Failure to connect to {{host}}:{{port}}",
host=self.settings.host,
port=self.settings.port,
cause=e
)
else:
Log.error(u"Failure to connect. PROTOCOL PREFIX IS PROBABLY BAD", e)
self.cursor = None
self.partial_rollback = False
self.transaction_level = 0
self.backlog = [] # accumulate the write commands so they are sent at once
if self.readonly:
self.begin()
|
[
"def",
"_open",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"db",
"=",
"connect",
"(",
"host",
"=",
"self",
".",
"settings",
".",
"host",
",",
"port",
"=",
"self",
".",
"settings",
".",
"port",
",",
"user",
"=",
"coalesce",
"(",
"self",
".",
"settings",
".",
"username",
",",
"self",
".",
"settings",
".",
"user",
")",
",",
"passwd",
"=",
"coalesce",
"(",
"self",
".",
"settings",
".",
"password",
",",
"self",
".",
"settings",
".",
"passwd",
")",
",",
"db",
"=",
"coalesce",
"(",
"self",
".",
"settings",
".",
"schema",
",",
"self",
".",
"settings",
".",
"db",
")",
",",
"read_timeout",
"=",
"coalesce",
"(",
"self",
".",
"settings",
".",
"read_timeout",
",",
"(",
"EXECUTE_TIMEOUT",
"/",
"1000",
")",
"-",
"10",
"if",
"EXECUTE_TIMEOUT",
"else",
"None",
",",
"5",
"*",
"60",
")",
",",
"charset",
"=",
"u\"utf8\"",
",",
"use_unicode",
"=",
"True",
",",
"ssl",
"=",
"coalesce",
"(",
"self",
".",
"settings",
".",
"ssl",
",",
"None",
")",
",",
"cursorclass",
"=",
"cursors",
".",
"SSCursor",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"self",
".",
"settings",
".",
"host",
".",
"find",
"(",
"\"://\"",
")",
"==",
"-",
"1",
":",
"Log",
".",
"error",
"(",
"u\"Failure to connect to {{host}}:{{port}}\"",
",",
"host",
"=",
"self",
".",
"settings",
".",
"host",
",",
"port",
"=",
"self",
".",
"settings",
".",
"port",
",",
"cause",
"=",
"e",
")",
"else",
":",
"Log",
".",
"error",
"(",
"u\"Failure to connect. PROTOCOL PREFIX IS PROBABLY BAD\"",
",",
"e",
")",
"self",
".",
"cursor",
"=",
"None",
"self",
".",
"partial_rollback",
"=",
"False",
"self",
".",
"transaction_level",
"=",
"0",
"self",
".",
"backlog",
"=",
"[",
"]",
"# accumulate the write commands so they are sent at once",
"if",
"self",
".",
"readonly",
":",
"self",
".",
"begin",
"(",
")"
] |
DO NOT USE THIS UNLESS YOU close() FIRST
|
[
"DO",
"NOT",
"USE",
"THIS",
"UNLESS",
"YOU",
"close",
"()",
"FIRST"
] |
python
|
train
|
Azure/azure-cli-extensions
|
src/sqlvm-preview/azext_sqlvm_preview/_format.py
|
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/sqlvm-preview/azext_sqlvm_preview/_format.py#L150-L178
|
def format_auto_backup_settings(result):
'''
Formats the AutoBackupSettings object removing arguments that are empty
'''
from collections import OrderedDict
# Only display parameters that have content
order_dict = OrderedDict()
if result.enable is not None:
order_dict['enable'] = result.enable
if result.enable_encryption is not None:
order_dict['enableEncryption'] = result.enable_encryption
if result.retention_period is not None:
order_dict['retentionPeriod'] = result.retention_period
if result.storage_account_url is not None:
order_dict['storageAccountUrl'] = result.storage_account_url
if result.backup_system_dbs is not None:
order_dict['backupSystemDbs'] = result.backup_system_dbs
if result.backup_schedule_type is not None:
order_dict['backupScheduleType'] = result.backup_schedule_type
if result.full_backup_frequency is not None:
order_dict['fullBackupFrequency'] = result.full_backup_frequency
if result.full_backup_start_time is not None:
order_dict['fullBackupStartTime'] = result.full_backup_start_time
if result.full_backup_window_hours is not None:
order_dict['fullBackupWindowHours'] = result.full_backup_window_hours
if result.log_backup_frequency is not None:
order_dict['logBackupFrequency'] = result.log_backup_frequency
return order_dict
|
[
"def",
"format_auto_backup_settings",
"(",
"result",
")",
":",
"from",
"collections",
"import",
"OrderedDict",
"# Only display parameters that have content",
"order_dict",
"=",
"OrderedDict",
"(",
")",
"if",
"result",
".",
"enable",
"is",
"not",
"None",
":",
"order_dict",
"[",
"'enable'",
"]",
"=",
"result",
".",
"enable",
"if",
"result",
".",
"enable_encryption",
"is",
"not",
"None",
":",
"order_dict",
"[",
"'enableEncryption'",
"]",
"=",
"result",
".",
"enable_encryption",
"if",
"result",
".",
"retention_period",
"is",
"not",
"None",
":",
"order_dict",
"[",
"'retentionPeriod'",
"]",
"=",
"result",
".",
"retention_period",
"if",
"result",
".",
"storage_account_url",
"is",
"not",
"None",
":",
"order_dict",
"[",
"'storageAccountUrl'",
"]",
"=",
"result",
".",
"storage_account_url",
"if",
"result",
".",
"backup_system_dbs",
"is",
"not",
"None",
":",
"order_dict",
"[",
"'backupSystemDbs'",
"]",
"=",
"result",
".",
"backup_system_dbs",
"if",
"result",
".",
"backup_schedule_type",
"is",
"not",
"None",
":",
"order_dict",
"[",
"'backupScheduleType'",
"]",
"=",
"result",
".",
"backup_schedule_type",
"if",
"result",
".",
"full_backup_frequency",
"is",
"not",
"None",
":",
"order_dict",
"[",
"'fullBackupFrequency'",
"]",
"=",
"result",
".",
"full_backup_frequency",
"if",
"result",
".",
"full_backup_start_time",
"is",
"not",
"None",
":",
"order_dict",
"[",
"'fullBackupStartTime'",
"]",
"=",
"result",
".",
"full_backup_start_time",
"if",
"result",
".",
"full_backup_window_hours",
"is",
"not",
"None",
":",
"order_dict",
"[",
"'fullBackupWindowHours'",
"]",
"=",
"result",
".",
"full_backup_window_hours",
"if",
"result",
".",
"log_backup_frequency",
"is",
"not",
"None",
":",
"order_dict",
"[",
"'logBackupFrequency'",
"]",
"=",
"result",
".",
"log_backup_frequency",
"return",
"order_dict"
] |
Formats the AutoBackupSettings object removing arguments that are empty
|
[
"Formats",
"the",
"AutoBackupSettings",
"object",
"removing",
"arguments",
"that",
"are",
"empty"
] |
python
|
train
|
yero13/na3x
|
na3x/cfg.py
|
https://github.com/yero13/na3x/blob/b31ef801ea574081125020a7d0f9c4242f8f8b02/na3x/cfg.py#L12-L23
|
def init(cfg):
"""
Initialiaze na3x
:param cfg: db, triggers, environment variables configuration
"""
global na3x_cfg
with open(cfg[NA3X_DB]) as db_cfg_file:
na3x_cfg[NA3X_DB] = json.load(db_cfg_file, strict=False)
with open(cfg[NA3X_TRIGGERS]) as triggers_cfg_file:
na3x_cfg[NA3X_TRIGGERS] = json.load(triggers_cfg_file, strict=False)
with open(cfg[NA3X_ENV]) as env_cfg_file:
na3x_cfg[NA3X_ENV] = json.load(env_cfg_file, strict=False)
|
[
"def",
"init",
"(",
"cfg",
")",
":",
"global",
"na3x_cfg",
"with",
"open",
"(",
"cfg",
"[",
"NA3X_DB",
"]",
")",
"as",
"db_cfg_file",
":",
"na3x_cfg",
"[",
"NA3X_DB",
"]",
"=",
"json",
".",
"load",
"(",
"db_cfg_file",
",",
"strict",
"=",
"False",
")",
"with",
"open",
"(",
"cfg",
"[",
"NA3X_TRIGGERS",
"]",
")",
"as",
"triggers_cfg_file",
":",
"na3x_cfg",
"[",
"NA3X_TRIGGERS",
"]",
"=",
"json",
".",
"load",
"(",
"triggers_cfg_file",
",",
"strict",
"=",
"False",
")",
"with",
"open",
"(",
"cfg",
"[",
"NA3X_ENV",
"]",
")",
"as",
"env_cfg_file",
":",
"na3x_cfg",
"[",
"NA3X_ENV",
"]",
"=",
"json",
".",
"load",
"(",
"env_cfg_file",
",",
"strict",
"=",
"False",
")"
] |
Initialiaze na3x
:param cfg: db, triggers, environment variables configuration
|
[
"Initialiaze",
"na3x",
":",
"param",
"cfg",
":",
"db",
"triggers",
"environment",
"variables",
"configuration"
] |
python
|
train
|
nickmckay/LiPD-utilities
|
Python/lipd/directory.py
|
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/directory.py#L288-L305
|
def get_filenames_in_lipd(path, name=""):
"""
List all the files contained in the LiPD archive. Bagit, JSON, and CSV
:param str path: Directory to be listed
:param str name: LiPD dataset name, if you want to prefix it to show file hierarchy
:return list: Filenames found
"""
_filenames = []
try:
# in the top level, list all files and skip the "data" directory
_top = [os.path.join(name, f) for f in os.listdir(path) if f != "data"]
# in the data directory, list all files
_dir_data = [os.path.join(name, "data", f) for f in os.listdir(os.path.join(path, "data"))]
# combine the two lists
_filenames = _top + _dir_data
except Exception:
pass
return _filenames
|
[
"def",
"get_filenames_in_lipd",
"(",
"path",
",",
"name",
"=",
"\"\"",
")",
":",
"_filenames",
"=",
"[",
"]",
"try",
":",
"# in the top level, list all files and skip the \"data\" directory",
"_top",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"name",
",",
"f",
")",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
"if",
"f",
"!=",
"\"data\"",
"]",
"# in the data directory, list all files",
"_dir_data",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"name",
",",
"\"data\"",
",",
"f",
")",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"data\"",
")",
")",
"]",
"# combine the two lists",
"_filenames",
"=",
"_top",
"+",
"_dir_data",
"except",
"Exception",
":",
"pass",
"return",
"_filenames"
] |
List all the files contained in the LiPD archive. Bagit, JSON, and CSV
:param str path: Directory to be listed
:param str name: LiPD dataset name, if you want to prefix it to show file hierarchy
:return list: Filenames found
|
[
"List",
"all",
"the",
"files",
"contained",
"in",
"the",
"LiPD",
"archive",
".",
"Bagit",
"JSON",
"and",
"CSV",
":",
"param",
"str",
"path",
":",
"Directory",
"to",
"be",
"listed",
":",
"param",
"str",
"name",
":",
"LiPD",
"dataset",
"name",
"if",
"you",
"want",
"to",
"prefix",
"it",
"to",
"show",
"file",
"hierarchy",
":",
"return",
"list",
":",
"Filenames",
"found"
] |
python
|
train
|
allenai/allennlp
|
allennlp/semparse/domain_languages/wikitables_language.py
|
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/wikitables_language.py#L323-L342
|
def evaluate_logical_form(self, logical_form: str, target_list: List[str]) -> bool:
"""
Takes a logical form, and the list of target values as strings from the original lisp
string, and returns True iff the logical form executes to the target list, using the
official WikiTableQuestions evaluation script.
"""
normalized_target_list = [TableQuestionContext.normalize_string(value) for value in
target_list]
target_value_list = evaluator.to_value_list(normalized_target_list)
try:
denotation = self.execute(logical_form)
except ExecutionError:
logger.warning(f'Failed to execute: {logical_form}')
return False
if isinstance(denotation, list):
denotation_list = [str(denotation_item) for denotation_item in denotation]
else:
denotation_list = [str(denotation)]
denotation_value_list = evaluator.to_value_list(denotation_list)
return evaluator.check_denotation(target_value_list, denotation_value_list)
|
[
"def",
"evaluate_logical_form",
"(",
"self",
",",
"logical_form",
":",
"str",
",",
"target_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"bool",
":",
"normalized_target_list",
"=",
"[",
"TableQuestionContext",
".",
"normalize_string",
"(",
"value",
")",
"for",
"value",
"in",
"target_list",
"]",
"target_value_list",
"=",
"evaluator",
".",
"to_value_list",
"(",
"normalized_target_list",
")",
"try",
":",
"denotation",
"=",
"self",
".",
"execute",
"(",
"logical_form",
")",
"except",
"ExecutionError",
":",
"logger",
".",
"warning",
"(",
"f'Failed to execute: {logical_form}'",
")",
"return",
"False",
"if",
"isinstance",
"(",
"denotation",
",",
"list",
")",
":",
"denotation_list",
"=",
"[",
"str",
"(",
"denotation_item",
")",
"for",
"denotation_item",
"in",
"denotation",
"]",
"else",
":",
"denotation_list",
"=",
"[",
"str",
"(",
"denotation",
")",
"]",
"denotation_value_list",
"=",
"evaluator",
".",
"to_value_list",
"(",
"denotation_list",
")",
"return",
"evaluator",
".",
"check_denotation",
"(",
"target_value_list",
",",
"denotation_value_list",
")"
] |
Takes a logical form, and the list of target values as strings from the original lisp
string, and returns True iff the logical form executes to the target list, using the
official WikiTableQuestions evaluation script.
|
[
"Takes",
"a",
"logical",
"form",
"and",
"the",
"list",
"of",
"target",
"values",
"as",
"strings",
"from",
"the",
"original",
"lisp",
"string",
"and",
"returns",
"True",
"iff",
"the",
"logical",
"form",
"executes",
"to",
"the",
"target",
"list",
"using",
"the",
"official",
"WikiTableQuestions",
"evaluation",
"script",
"."
] |
python
|
train
|
csurfer/pyheat
|
pyheat/commandline.py
|
https://github.com/csurfer/pyheat/blob/cc0ee3721aea70a1da4918957500131aa7077afe/pyheat/commandline.py#L31-L50
|
def main():
"""Starting point for the program execution."""
# Create command line parser.
parser = argparse.ArgumentParser()
# Adding command line arguments.
parser.add_argument("-o", "--out", help="Output file", default=None)
parser.add_argument(
"pyfile", help="Python file to be profiled", default=None
)
# Parse command line arguments.
arguments = parser.parse_args()
if arguments.pyfile is not None:
# Core functionality.
pyheat = PyHeat(arguments.pyfile)
pyheat.create_heatmap()
pyheat.show_heatmap(output_file=arguments.out, enable_scroll=True)
pyheat.close_heatmap()
else:
# Print command help
parser.print_help()
|
[
"def",
"main",
"(",
")",
":",
"# Create command line parser.",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"# Adding command line arguments.",
"parser",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--out\"",
",",
"help",
"=",
"\"Output file\"",
",",
"default",
"=",
"None",
")",
"parser",
".",
"add_argument",
"(",
"\"pyfile\"",
",",
"help",
"=",
"\"Python file to be profiled\"",
",",
"default",
"=",
"None",
")",
"# Parse command line arguments.",
"arguments",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"arguments",
".",
"pyfile",
"is",
"not",
"None",
":",
"# Core functionality.",
"pyheat",
"=",
"PyHeat",
"(",
"arguments",
".",
"pyfile",
")",
"pyheat",
".",
"create_heatmap",
"(",
")",
"pyheat",
".",
"show_heatmap",
"(",
"output_file",
"=",
"arguments",
".",
"out",
",",
"enable_scroll",
"=",
"True",
")",
"pyheat",
".",
"close_heatmap",
"(",
")",
"else",
":",
"# Print command help",
"parser",
".",
"print_help",
"(",
")"
] |
Starting point for the program execution.
|
[
"Starting",
"point",
"for",
"the",
"program",
"execution",
"."
] |
python
|
train
|
AguaClara/aguaclara
|
aguaclara/research/procoda_parser.py
|
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/research/procoda_parser.py#L105-L128
|
def data_from_dates(path, dates):
"""Return list DataFrames representing the ProCoDA datalogs stored in
the given path and recorded on the given dates.
:param path: The path to the folder containing the ProCoDA data file(s)
:type path: string
:param dates: A single date or list of dates for which data was recorded, formatted "M-D-YYYY"
:type dates: string or string list
:return: a list DataFrame objects representing the ProCoDA datalogs corresponding with the given dates
:rtype: pandas.DataFrame list
"""
if path[-1] != os.path.sep:
path += os.path.sep
if not isinstance(dates, list):
dates = [dates]
data = []
for d in dates:
filepath = path + 'datalog ' + d + '.xls'
data.append(remove_notes(pd.read_csv(filepath, delimiter='\t')))
return data
|
[
"def",
"data_from_dates",
"(",
"path",
",",
"dates",
")",
":",
"if",
"path",
"[",
"-",
"1",
"]",
"!=",
"os",
".",
"path",
".",
"sep",
":",
"path",
"+=",
"os",
".",
"path",
".",
"sep",
"if",
"not",
"isinstance",
"(",
"dates",
",",
"list",
")",
":",
"dates",
"=",
"[",
"dates",
"]",
"data",
"=",
"[",
"]",
"for",
"d",
"in",
"dates",
":",
"filepath",
"=",
"path",
"+",
"'datalog '",
"+",
"d",
"+",
"'.xls'",
"data",
".",
"append",
"(",
"remove_notes",
"(",
"pd",
".",
"read_csv",
"(",
"filepath",
",",
"delimiter",
"=",
"'\\t'",
")",
")",
")",
"return",
"data"
] |
Return list DataFrames representing the ProCoDA datalogs stored in
the given path and recorded on the given dates.
:param path: The path to the folder containing the ProCoDA data file(s)
:type path: string
:param dates: A single date or list of dates for which data was recorded, formatted "M-D-YYYY"
:type dates: string or string list
:return: a list DataFrame objects representing the ProCoDA datalogs corresponding with the given dates
:rtype: pandas.DataFrame list
|
[
"Return",
"list",
"DataFrames",
"representing",
"the",
"ProCoDA",
"datalogs",
"stored",
"in",
"the",
"given",
"path",
"and",
"recorded",
"on",
"the",
"given",
"dates",
"."
] |
python
|
train
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/utils/_process_win32_controller.py
|
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/_process_win32_controller.py#L475-L490
|
def _stdin_raw_block(self):
"""Use a blocking stdin read"""
# The big problem with the blocking read is that it doesn't
# exit when it's supposed to in all contexts. An extra
# key-press may be required to trigger the exit.
try:
data = sys.stdin.read(1)
data = data.replace('\r', '\n')
return data
except WindowsError as we:
if we.winerror == ERROR_NO_DATA:
# This error occurs when the pipe is closed
return None
else:
# Otherwise let the error propagate
raise we
|
[
"def",
"_stdin_raw_block",
"(",
"self",
")",
":",
"# The big problem with the blocking read is that it doesn't",
"# exit when it's supposed to in all contexts. An extra",
"# key-press may be required to trigger the exit.",
"try",
":",
"data",
"=",
"sys",
".",
"stdin",
".",
"read",
"(",
"1",
")",
"data",
"=",
"data",
".",
"replace",
"(",
"'\\r'",
",",
"'\\n'",
")",
"return",
"data",
"except",
"WindowsError",
"as",
"we",
":",
"if",
"we",
".",
"winerror",
"==",
"ERROR_NO_DATA",
":",
"# This error occurs when the pipe is closed",
"return",
"None",
"else",
":",
"# Otherwise let the error propagate",
"raise",
"we"
] |
Use a blocking stdin read
|
[
"Use",
"a",
"blocking",
"stdin",
"read"
] |
python
|
test
|
fermiPy/fermipy
|
fermipy/scripts/cluster_sources.py
|
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/scripts/cluster_sources.py#L420-L443
|
def make_reverse_dict(in_dict, warn=True):
""" Build a reverse dictionary from a cluster dictionary
Parameters
----------
in_dict : dict(int:[int,])
A dictionary of clusters. Each cluster is a source index and
the list of other source in the cluster.
Returns
-------
out_dict : dict(int:int)
A single valued dictionary pointing from source index to
cluster key for each source in a cluster. Note that the key
does not point to itself.
"""
out_dict = {}
for k, v in in_dict.items():
for vv in v:
if vv in out_dict:
if warn:
print("Dictionary collision %i" % vv)
out_dict[vv] = k
return out_dict
|
[
"def",
"make_reverse_dict",
"(",
"in_dict",
",",
"warn",
"=",
"True",
")",
":",
"out_dict",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"in_dict",
".",
"items",
"(",
")",
":",
"for",
"vv",
"in",
"v",
":",
"if",
"vv",
"in",
"out_dict",
":",
"if",
"warn",
":",
"print",
"(",
"\"Dictionary collision %i\"",
"%",
"vv",
")",
"out_dict",
"[",
"vv",
"]",
"=",
"k",
"return",
"out_dict"
] |
Build a reverse dictionary from a cluster dictionary
Parameters
----------
in_dict : dict(int:[int,])
A dictionary of clusters. Each cluster is a source index and
the list of other source in the cluster.
Returns
-------
out_dict : dict(int:int)
A single valued dictionary pointing from source index to
cluster key for each source in a cluster. Note that the key
does not point to itself.
|
[
"Build",
"a",
"reverse",
"dictionary",
"from",
"a",
"cluster",
"dictionary"
] |
python
|
train
|
matgrioni/betacode
|
betacode/conv.py
|
https://github.com/matgrioni/betacode/blob/2f8b439c0de9cdf451b0b390161752cac9879137/betacode/conv.py#L149-L176
|
def uni_to_beta(text):
"""
Convert unicode text to a betacode equivalent.
This method can handle tónos or oxeîa characters in the input.
Args:
text: The text to convert to betacode. This text does not have to all be
Greek polytonic text, and only Greek characters will be converted. Note
that in this case, you cannot convert to beta and then back to unicode.
Returns:
The betacode equivalent of the inputted text where applicable.
"""
u = _UNICODE_MAP
transform = []
for ch in text:
try:
conv = u[ch]
except KeyError:
conv = ch
transform.append(conv)
converted = ''.join(transform)
return converted
|
[
"def",
"uni_to_beta",
"(",
"text",
")",
":",
"u",
"=",
"_UNICODE_MAP",
"transform",
"=",
"[",
"]",
"for",
"ch",
"in",
"text",
":",
"try",
":",
"conv",
"=",
"u",
"[",
"ch",
"]",
"except",
"KeyError",
":",
"conv",
"=",
"ch",
"transform",
".",
"append",
"(",
"conv",
")",
"converted",
"=",
"''",
".",
"join",
"(",
"transform",
")",
"return",
"converted"
] |
Convert unicode text to a betacode equivalent.
This method can handle tónos or oxeîa characters in the input.
Args:
text: The text to convert to betacode. This text does not have to all be
Greek polytonic text, and only Greek characters will be converted. Note
that in this case, you cannot convert to beta and then back to unicode.
Returns:
The betacode equivalent of the inputted text where applicable.
|
[
"Convert",
"unicode",
"text",
"to",
"a",
"betacode",
"equivalent",
"."
] |
python
|
train
|
glottobank/python-newick
|
src/newick.py
|
https://github.com/glottobank/python-newick/blob/e8d4d1e4610f271d0f0e5cb86c0e0360b43bd702/src/newick.py#L374-L383
|
def dumps(trees):
"""
Serialize a list of trees in Newick format.
:param trees: List of Node objects or a single Node object.
:return: Newick formatted string.
"""
if isinstance(trees, Node):
trees = [trees]
return ';\n'.join([tree.newick for tree in trees]) + ';'
|
[
"def",
"dumps",
"(",
"trees",
")",
":",
"if",
"isinstance",
"(",
"trees",
",",
"Node",
")",
":",
"trees",
"=",
"[",
"trees",
"]",
"return",
"';\\n'",
".",
"join",
"(",
"[",
"tree",
".",
"newick",
"for",
"tree",
"in",
"trees",
"]",
")",
"+",
"';'"
] |
Serialize a list of trees in Newick format.
:param trees: List of Node objects or a single Node object.
:return: Newick formatted string.
|
[
"Serialize",
"a",
"list",
"of",
"trees",
"in",
"Newick",
"format",
"."
] |
python
|
test
|
sashahart/cookies
|
cookies.py
|
https://github.com/sashahart/cookies/blob/ab8185e06f221eaf65305f15e05852393723ac95/cookies.py#L430-L440
|
def valid_name(name):
"Validate a cookie name string"
if isinstance(name, bytes):
name = name.decode('ascii')
if not Definitions.COOKIE_NAME_RE.match(name):
return False
# This module doesn't support $identifiers, which are part of an obsolete
# and highly complex standard which is never used.
if name[0] == "$":
return False
return True
|
[
"def",
"valid_name",
"(",
"name",
")",
":",
"if",
"isinstance",
"(",
"name",
",",
"bytes",
")",
":",
"name",
"=",
"name",
".",
"decode",
"(",
"'ascii'",
")",
"if",
"not",
"Definitions",
".",
"COOKIE_NAME_RE",
".",
"match",
"(",
"name",
")",
":",
"return",
"False",
"# This module doesn't support $identifiers, which are part of an obsolete",
"# and highly complex standard which is never used.",
"if",
"name",
"[",
"0",
"]",
"==",
"\"$\"",
":",
"return",
"False",
"return",
"True"
] |
Validate a cookie name string
|
[
"Validate",
"a",
"cookie",
"name",
"string"
] |
python
|
train
|
jkitzes/macroeco
|
doc/_ext/juliadoc/juliadoc/__init__.py
|
https://github.com/jkitzes/macroeco/blob/ee5fac5560a2d64de3a64738b5bc6833e2d7ff2e/doc/_ext/juliadoc/juliadoc/__init__.py#L3-L10
|
def get_theme_dir():
"""
Returns path to directory containing this package's theme.
This is designed to be used when setting the ``html_theme_path``
option within Sphinx's ``conf.py`` file.
"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), "theme"))
|
[
"def",
"get_theme_dir",
"(",
")",
":",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"\"theme\"",
")",
")"
] |
Returns path to directory containing this package's theme.
This is designed to be used when setting the ``html_theme_path``
option within Sphinx's ``conf.py`` file.
|
[
"Returns",
"path",
"to",
"directory",
"containing",
"this",
"package",
"s",
"theme",
".",
"This",
"is",
"designed",
"to",
"be",
"used",
"when",
"setting",
"the",
"html_theme_path",
"option",
"within",
"Sphinx",
"s",
"conf",
".",
"py",
"file",
"."
] |
python
|
train
|
onicagroup/runway
|
runway/hooks/staticsite/build_staticsite.py
|
https://github.com/onicagroup/runway/blob/3f3549ec3bf6e39b9f27d9738a1847f3a4369e7f/runway/hooks/staticsite/build_staticsite.py#L57-L77
|
def zip_and_upload(app_dir, bucket, key, session=None):
"""Zip built static site and upload to S3."""
if session:
s3_client = session.client('s3')
else:
s3_client = boto3.client('s3')
transfer = S3Transfer(s3_client)
filedes, temp_file = tempfile.mkstemp()
os.close(filedes)
LOGGER.info("staticsite: archiving app at %s to s3://%s/%s",
app_dir, bucket, key)
with zipfile.ZipFile(temp_file, 'w', zipfile.ZIP_DEFLATED) as filehandle:
with change_dir(app_dir):
for dirname, _subdirs, files in os.walk('./'):
if dirname != './':
filehandle.write(dirname)
for filename in files:
filehandle.write(os.path.join(dirname, filename))
transfer.upload_file(temp_file, bucket, key)
os.remove(temp_file)
|
[
"def",
"zip_and_upload",
"(",
"app_dir",
",",
"bucket",
",",
"key",
",",
"session",
"=",
"None",
")",
":",
"if",
"session",
":",
"s3_client",
"=",
"session",
".",
"client",
"(",
"'s3'",
")",
"else",
":",
"s3_client",
"=",
"boto3",
".",
"client",
"(",
"'s3'",
")",
"transfer",
"=",
"S3Transfer",
"(",
"s3_client",
")",
"filedes",
",",
"temp_file",
"=",
"tempfile",
".",
"mkstemp",
"(",
")",
"os",
".",
"close",
"(",
"filedes",
")",
"LOGGER",
".",
"info",
"(",
"\"staticsite: archiving app at %s to s3://%s/%s\"",
",",
"app_dir",
",",
"bucket",
",",
"key",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"temp_file",
",",
"'w'",
",",
"zipfile",
".",
"ZIP_DEFLATED",
")",
"as",
"filehandle",
":",
"with",
"change_dir",
"(",
"app_dir",
")",
":",
"for",
"dirname",
",",
"_subdirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"'./'",
")",
":",
"if",
"dirname",
"!=",
"'./'",
":",
"filehandle",
".",
"write",
"(",
"dirname",
")",
"for",
"filename",
"in",
"files",
":",
"filehandle",
".",
"write",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"filename",
")",
")",
"transfer",
".",
"upload_file",
"(",
"temp_file",
",",
"bucket",
",",
"key",
")",
"os",
".",
"remove",
"(",
"temp_file",
")"
] |
Zip built static site and upload to S3.
|
[
"Zip",
"built",
"static",
"site",
"and",
"upload",
"to",
"S3",
"."
] |
python
|
train
|
chaimleib/intervaltree
|
intervaltree/intervaltree.py
|
https://github.com/chaimleib/intervaltree/blob/ffb2b1667f8b832e89324a75a175be8440504c9d/intervaltree/intervaltree.py#L589-L598
|
def overlaps_point(self, p):
"""
Returns whether some interval in the tree overlaps p.
Completes in O(log n) time.
:rtype: bool
"""
if self.is_empty():
return False
return bool(self.top_node.contains_point(p))
|
[
"def",
"overlaps_point",
"(",
"self",
",",
"p",
")",
":",
"if",
"self",
".",
"is_empty",
"(",
")",
":",
"return",
"False",
"return",
"bool",
"(",
"self",
".",
"top_node",
".",
"contains_point",
"(",
"p",
")",
")"
] |
Returns whether some interval in the tree overlaps p.
Completes in O(log n) time.
:rtype: bool
|
[
"Returns",
"whether",
"some",
"interval",
"in",
"the",
"tree",
"overlaps",
"p",
"."
] |
python
|
train
|
PyCQA/pylint
|
pylint/lint.py
|
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/lint.py#L1219-L1226
|
def open(self):
"""initialize counters"""
self.stats = {"by_module": {}, "by_msg": {}}
MANAGER.always_load_extensions = self.config.unsafe_load_any_extension
MANAGER.max_inferable_values = self.config.limit_inference_results
MANAGER.extension_package_whitelist.update(self.config.extension_pkg_whitelist)
for msg_cat in MSG_TYPES.values():
self.stats[msg_cat] = 0
|
[
"def",
"open",
"(",
"self",
")",
":",
"self",
".",
"stats",
"=",
"{",
"\"by_module\"",
":",
"{",
"}",
",",
"\"by_msg\"",
":",
"{",
"}",
"}",
"MANAGER",
".",
"always_load_extensions",
"=",
"self",
".",
"config",
".",
"unsafe_load_any_extension",
"MANAGER",
".",
"max_inferable_values",
"=",
"self",
".",
"config",
".",
"limit_inference_results",
"MANAGER",
".",
"extension_package_whitelist",
".",
"update",
"(",
"self",
".",
"config",
".",
"extension_pkg_whitelist",
")",
"for",
"msg_cat",
"in",
"MSG_TYPES",
".",
"values",
"(",
")",
":",
"self",
".",
"stats",
"[",
"msg_cat",
"]",
"=",
"0"
] |
initialize counters
|
[
"initialize",
"counters"
] |
python
|
test
|
mitsei/dlkit
|
dlkit/json_/assessment/managers.py
|
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/managers.py#L1139-L1154
|
def get_assessment_admin_session(self):
"""Gets the ``OsidSession`` associated with the assessment administration service.
return: (osid.assessment.AssessmentAdminSession) - an
``AssessmentAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_assessment_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_admin()`` is ``true``.*
"""
if not self.supports_assessment_admin():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.AssessmentAdminSession(runtime=self._runtime)
|
[
"def",
"get_assessment_admin_session",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"supports_assessment_admin",
"(",
")",
":",
"raise",
"errors",
".",
"Unimplemented",
"(",
")",
"# pylint: disable=no-member",
"return",
"sessions",
".",
"AssessmentAdminSession",
"(",
"runtime",
"=",
"self",
".",
"_runtime",
")"
] |
Gets the ``OsidSession`` associated with the assessment administration service.
return: (osid.assessment.AssessmentAdminSession) - an
``AssessmentAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_assessment_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_admin()`` is ``true``.*
|
[
"Gets",
"the",
"OsidSession",
"associated",
"with",
"the",
"assessment",
"administration",
"service",
"."
] |
python
|
train
|
splunk/splunk-sdk-python
|
examples/analytics/bottle.py
|
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/examples/analytics/bottle.py#L493-L497
|
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True
|
[
"def",
"close",
"(",
"self",
")",
":",
"for",
"plugin",
"in",
"self",
".",
"plugins",
":",
"if",
"hasattr",
"(",
"plugin",
",",
"'close'",
")",
":",
"plugin",
".",
"close",
"(",
")",
"self",
".",
"stopped",
"=",
"True"
] |
Close the application and all installed plugins.
|
[
"Close",
"the",
"application",
"and",
"all",
"installed",
"plugins",
"."
] |
python
|
train
|
mitsei/dlkit
|
dlkit/json_/assessment_authoring/sessions.py
|
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment_authoring/sessions.py#L2107-L2140
|
def get_sequence_rules_by_ids(self, sequence_rule_ids):
"""Gets a ``SequenceRuleList`` corresponding to the given ``IdList``.
arg: sequence_rule_ids (osid.id.IdList): the list of ``Ids``
to retrieve
return: (osid.assessment.authoring.SequenceRuleList) - the
returned ``SequenceRule`` list
raise: NotFound - a ``Id was`` not found
raise: NullArgument - ``sequence_rule_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_ids
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('assessment_authoring',
collection='SequenceRule',
runtime=self._runtime)
object_id_list = []
for i in sequence_rule_ids:
object_id_list.append(ObjectId(self._get_id(i, 'assessment_authoring').get_identifier()))
result = collection.find(
dict({'_id': {'$in': object_id_list}},
**self._view_filter()))
result = list(result)
sorted_result = []
for object_id in object_id_list:
for object_map in result:
if object_map['_id'] == object_id:
sorted_result.append(object_map)
break
return objects.SequenceRuleList(sorted_result, runtime=self._runtime, proxy=self._proxy)
|
[
"def",
"get_sequence_rules_by_ids",
"(",
"self",
",",
"sequence_rule_ids",
")",
":",
"# Implemented from template for",
"# osid.resource.ResourceLookupSession.get_resources_by_ids",
"# NOTE: This implementation currently ignores plenary view",
"collection",
"=",
"JSONClientValidated",
"(",
"'assessment_authoring'",
",",
"collection",
"=",
"'SequenceRule'",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")",
"object_id_list",
"=",
"[",
"]",
"for",
"i",
"in",
"sequence_rule_ids",
":",
"object_id_list",
".",
"append",
"(",
"ObjectId",
"(",
"self",
".",
"_get_id",
"(",
"i",
",",
"'assessment_authoring'",
")",
".",
"get_identifier",
"(",
")",
")",
")",
"result",
"=",
"collection",
".",
"find",
"(",
"dict",
"(",
"{",
"'_id'",
":",
"{",
"'$in'",
":",
"object_id_list",
"}",
"}",
",",
"*",
"*",
"self",
".",
"_view_filter",
"(",
")",
")",
")",
"result",
"=",
"list",
"(",
"result",
")",
"sorted_result",
"=",
"[",
"]",
"for",
"object_id",
"in",
"object_id_list",
":",
"for",
"object_map",
"in",
"result",
":",
"if",
"object_map",
"[",
"'_id'",
"]",
"==",
"object_id",
":",
"sorted_result",
".",
"append",
"(",
"object_map",
")",
"break",
"return",
"objects",
".",
"SequenceRuleList",
"(",
"sorted_result",
",",
"runtime",
"=",
"self",
".",
"_runtime",
",",
"proxy",
"=",
"self",
".",
"_proxy",
")"
] |
Gets a ``SequenceRuleList`` corresponding to the given ``IdList``.
arg: sequence_rule_ids (osid.id.IdList): the list of ``Ids``
to retrieve
return: (osid.assessment.authoring.SequenceRuleList) - the
returned ``SequenceRule`` list
raise: NotFound - a ``Id was`` not found
raise: NullArgument - ``sequence_rule_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
[
"Gets",
"a",
"SequenceRuleList",
"corresponding",
"to",
"the",
"given",
"IdList",
"."
] |
python
|
train
|
joestump/python-oauth2
|
example/appengine_oauth.py
|
https://github.com/joestump/python-oauth2/blob/b94f69b1ad195513547924e380d9265133e995fa/example/appengine_oauth.py#L82-L92
|
def is_valid(self):
"""Returns a Client object if this is a valid OAuth request."""
try:
request = self.get_oauth_request()
client = self.get_client(request)
params = self._server.verify_request(request, client, None)
except Exception as e:
raise e
return client
|
[
"def",
"is_valid",
"(",
"self",
")",
":",
"try",
":",
"request",
"=",
"self",
".",
"get_oauth_request",
"(",
")",
"client",
"=",
"self",
".",
"get_client",
"(",
"request",
")",
"params",
"=",
"self",
".",
"_server",
".",
"verify_request",
"(",
"request",
",",
"client",
",",
"None",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"return",
"client"
] |
Returns a Client object if this is a valid OAuth request.
|
[
"Returns",
"a",
"Client",
"object",
"if",
"this",
"is",
"a",
"valid",
"OAuth",
"request",
"."
] |
python
|
train
|
ldomic/lintools
|
lintools/data.py
|
https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/data.py#L97-L105
|
def renumber_system(self, offset):
"""
The residue numbers of the protein can be reformated in case of misallignment with the convention.
Takes:
* offset * - a number that represents by how many residues the numbering has to be shifted.
"""
self.universe.protein = self.universe.select_atoms("protein")
self.universe.protein.residues.resids = self.universe.protein.residues.resids+int(offset)
|
[
"def",
"renumber_system",
"(",
"self",
",",
"offset",
")",
":",
"self",
".",
"universe",
".",
"protein",
"=",
"self",
".",
"universe",
".",
"select_atoms",
"(",
"\"protein\"",
")",
"self",
".",
"universe",
".",
"protein",
".",
"residues",
".",
"resids",
"=",
"self",
".",
"universe",
".",
"protein",
".",
"residues",
".",
"resids",
"+",
"int",
"(",
"offset",
")"
] |
The residue numbers of the protein can be reformated in case of misallignment with the convention.
Takes:
* offset * - a number that represents by how many residues the numbering has to be shifted.
|
[
"The",
"residue",
"numbers",
"of",
"the",
"protein",
"can",
"be",
"reformated",
"in",
"case",
"of",
"misallignment",
"with",
"the",
"convention",
".",
"Takes",
":",
"*",
"offset",
"*",
"-",
"a",
"number",
"that",
"represents",
"by",
"how",
"many",
"residues",
"the",
"numbering",
"has",
"to",
"be",
"shifted",
"."
] |
python
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/revnet.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/revnet.py#L345-L378
|
def revnet_base():
"""Default hparams for Revnet."""
hparams = common_hparams.basic_params1()
hparams.add_hparam('num_channels', [64, 128, 256, 416])
hparams.add_hparam('num_layers_per_block', [1, 1, 10, 1])
hparams.add_hparam('bottleneck', True)
hparams.add_hparam('first_batch_norm', [False, True, True, True])
hparams.add_hparam('init_stride', 2)
hparams.add_hparam('init_kernel_size', 7)
hparams.add_hparam('init_maxpool', True)
hparams.add_hparam('strides', [1, 2, 2, 2])
hparams.add_hparam('num_channels_init_block', 64)
hparams.add_hparam('dim', '2d')
# Variable init
hparams.initializer = 'normal_unit_scaling'
hparams.initializer_gain = 2.
# Optimization
hparams.optimizer = 'Momentum'
hparams.optimizer_momentum_momentum = 0.9
hparams.optimizer_momentum_nesterov = True
hparams.weight_decay = 1e-4
hparams.clip_grad_norm = 0.0
# (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.)
hparams.learning_rate = 0.4
hparams.learning_rate_decay_scheme = 'cosine'
# For image_imagenet224, 120k training steps, which effectively makes this a
# cosine decay (i.e. no cycles).
hparams.learning_rate_cosine_cycle_steps = 120000
# Can run with a batch size of 128 with Problem ImageImagenet224
hparams.batch_size = 128
return hparams
|
[
"def",
"revnet_base",
"(",
")",
":",
"hparams",
"=",
"common_hparams",
".",
"basic_params1",
"(",
")",
"hparams",
".",
"add_hparam",
"(",
"'num_channels'",
",",
"[",
"64",
",",
"128",
",",
"256",
",",
"416",
"]",
")",
"hparams",
".",
"add_hparam",
"(",
"'num_layers_per_block'",
",",
"[",
"1",
",",
"1",
",",
"10",
",",
"1",
"]",
")",
"hparams",
".",
"add_hparam",
"(",
"'bottleneck'",
",",
"True",
")",
"hparams",
".",
"add_hparam",
"(",
"'first_batch_norm'",
",",
"[",
"False",
",",
"True",
",",
"True",
",",
"True",
"]",
")",
"hparams",
".",
"add_hparam",
"(",
"'init_stride'",
",",
"2",
")",
"hparams",
".",
"add_hparam",
"(",
"'init_kernel_size'",
",",
"7",
")",
"hparams",
".",
"add_hparam",
"(",
"'init_maxpool'",
",",
"True",
")",
"hparams",
".",
"add_hparam",
"(",
"'strides'",
",",
"[",
"1",
",",
"2",
",",
"2",
",",
"2",
"]",
")",
"hparams",
".",
"add_hparam",
"(",
"'num_channels_init_block'",
",",
"64",
")",
"hparams",
".",
"add_hparam",
"(",
"'dim'",
",",
"'2d'",
")",
"# Variable init",
"hparams",
".",
"initializer",
"=",
"'normal_unit_scaling'",
"hparams",
".",
"initializer_gain",
"=",
"2.",
"# Optimization",
"hparams",
".",
"optimizer",
"=",
"'Momentum'",
"hparams",
".",
"optimizer_momentum_momentum",
"=",
"0.9",
"hparams",
".",
"optimizer_momentum_nesterov",
"=",
"True",
"hparams",
".",
"weight_decay",
"=",
"1e-4",
"hparams",
".",
"clip_grad_norm",
"=",
"0.0",
"# (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.)",
"hparams",
".",
"learning_rate",
"=",
"0.4",
"hparams",
".",
"learning_rate_decay_scheme",
"=",
"'cosine'",
"# For image_imagenet224, 120k training steps, which effectively makes this a",
"# cosine decay (i.e. no cycles).",
"hparams",
".",
"learning_rate_cosine_cycle_steps",
"=",
"120000",
"# Can run with a batch size of 128 with Problem ImageImagenet224",
"hparams",
".",
"batch_size",
"=",
"128",
"return",
"hparams"
] |
Default hparams for Revnet.
|
[
"Default",
"hparams",
"for",
"Revnet",
"."
] |
python
|
train
|
sassoftware/sas_kernel
|
sas_kernel/kernel.py
|
https://github.com/sassoftware/sas_kernel/blob/ed63dceb9d1d51157b465f4892ffb793c1c32307/sas_kernel/kernel.py#L131-L179
|
def do_execute_direct(self, code: str, silent: bool = False) -> [str, dict]:
"""
This is the main method that takes code from the Jupyter cell and submits it to the SAS server
:param code: code from the cell
:param silent:
:return: str with either the log or list
"""
if not code.strip():
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
if self.mva is None:
self._allow_stdin = True
self._start_sas()
if self.lst_len < 0:
self._get_lst_len()
if code.startswith('Obfuscated SAS Code'):
logger.debug("decoding string")
tmp1 = code.split()
decode = base64.b64decode(tmp1[-1])
code = decode.decode('utf-8')
if code.startswith('showSASLog_11092015') == False and code.startswith("CompleteshowSASLog_11092015") == False:
logger.debug("code type: " + str(type(code)))
logger.debug("code length: " + str(len(code)))
logger.debug("code string: " + code)
if code.startswith("/*SASKernelTest*/"):
res = self.mva.submit(code, "text")
else:
res = self.mva.submit(code, prompt=self.promptDict)
self.promptDict = {}
if res['LOG'].find("SAS process has terminated unexpectedly") > -1:
print(res['LOG'], '\n' "Restarting SAS session on your behalf")
self.do_shutdown(True)
return res['LOG']
output = res['LST']
log = res['LOG']
return self._which_display(log, output)
elif code.startswith("CompleteshowSASLog_11092015") == True and code.startswith('showSASLog_11092015') == False:
full_log = highlight(self.mva.saslog(), SASLogLexer(),
HtmlFormatter(full=True, style=SASLogStyle, lineseparator="<br>",
title="Full SAS Log"))
return full_log.replace('\n', ' ')
else:
return self.cachedlog.replace('\n', ' ')
|
[
"def",
"do_execute_direct",
"(",
"self",
",",
"code",
":",
"str",
",",
"silent",
":",
"bool",
"=",
"False",
")",
"->",
"[",
"str",
",",
"dict",
"]",
":",
"if",
"not",
"code",
".",
"strip",
"(",
")",
":",
"return",
"{",
"'status'",
":",
"'ok'",
",",
"'execution_count'",
":",
"self",
".",
"execution_count",
",",
"'payload'",
":",
"[",
"]",
",",
"'user_expressions'",
":",
"{",
"}",
"}",
"if",
"self",
".",
"mva",
"is",
"None",
":",
"self",
".",
"_allow_stdin",
"=",
"True",
"self",
".",
"_start_sas",
"(",
")",
"if",
"self",
".",
"lst_len",
"<",
"0",
":",
"self",
".",
"_get_lst_len",
"(",
")",
"if",
"code",
".",
"startswith",
"(",
"'Obfuscated SAS Code'",
")",
":",
"logger",
".",
"debug",
"(",
"\"decoding string\"",
")",
"tmp1",
"=",
"code",
".",
"split",
"(",
")",
"decode",
"=",
"base64",
".",
"b64decode",
"(",
"tmp1",
"[",
"-",
"1",
"]",
")",
"code",
"=",
"decode",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"code",
".",
"startswith",
"(",
"'showSASLog_11092015'",
")",
"==",
"False",
"and",
"code",
".",
"startswith",
"(",
"\"CompleteshowSASLog_11092015\"",
")",
"==",
"False",
":",
"logger",
".",
"debug",
"(",
"\"code type: \"",
"+",
"str",
"(",
"type",
"(",
"code",
")",
")",
")",
"logger",
".",
"debug",
"(",
"\"code length: \"",
"+",
"str",
"(",
"len",
"(",
"code",
")",
")",
")",
"logger",
".",
"debug",
"(",
"\"code string: \"",
"+",
"code",
")",
"if",
"code",
".",
"startswith",
"(",
"\"/*SASKernelTest*/\"",
")",
":",
"res",
"=",
"self",
".",
"mva",
".",
"submit",
"(",
"code",
",",
"\"text\"",
")",
"else",
":",
"res",
"=",
"self",
".",
"mva",
".",
"submit",
"(",
"code",
",",
"prompt",
"=",
"self",
".",
"promptDict",
")",
"self",
".",
"promptDict",
"=",
"{",
"}",
"if",
"res",
"[",
"'LOG'",
"]",
".",
"find",
"(",
"\"SAS process has terminated unexpectedly\"",
")",
">",
"-",
"1",
":",
"print",
"(",
"res",
"[",
"'LOG'",
"]",
",",
"'\\n'",
"\"Restarting SAS session on your behalf\"",
")",
"self",
".",
"do_shutdown",
"(",
"True",
")",
"return",
"res",
"[",
"'LOG'",
"]",
"output",
"=",
"res",
"[",
"'LST'",
"]",
"log",
"=",
"res",
"[",
"'LOG'",
"]",
"return",
"self",
".",
"_which_display",
"(",
"log",
",",
"output",
")",
"elif",
"code",
".",
"startswith",
"(",
"\"CompleteshowSASLog_11092015\"",
")",
"==",
"True",
"and",
"code",
".",
"startswith",
"(",
"'showSASLog_11092015'",
")",
"==",
"False",
":",
"full_log",
"=",
"highlight",
"(",
"self",
".",
"mva",
".",
"saslog",
"(",
")",
",",
"SASLogLexer",
"(",
")",
",",
"HtmlFormatter",
"(",
"full",
"=",
"True",
",",
"style",
"=",
"SASLogStyle",
",",
"lineseparator",
"=",
"\"<br>\"",
",",
"title",
"=",
"\"Full SAS Log\"",
")",
")",
"return",
"full_log",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
"else",
":",
"return",
"self",
".",
"cachedlog",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")"
] |
This is the main method that takes code from the Jupyter cell and submits it to the SAS server
:param code: code from the cell
:param silent:
:return: str with either the log or list
|
[
"This",
"is",
"the",
"main",
"method",
"that",
"takes",
"code",
"from",
"the",
"Jupyter",
"cell",
"and",
"submits",
"it",
"to",
"the",
"SAS",
"server"
] |
python
|
train
|
jwkvam/bowtie
|
bowtie/_app.py
|
https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L836-L849
|
def schedule(self, seconds: float):
"""Call a function periodically.
Parameters
----------
seconds : float
Minimum interval of function calls.
func : callable
Function to be called.
"""
def wrap(func: Callable):
self._schedules.append(Scheduler(self.app, seconds, func))
return wrap
|
[
"def",
"schedule",
"(",
"self",
",",
"seconds",
":",
"float",
")",
":",
"def",
"wrap",
"(",
"func",
":",
"Callable",
")",
":",
"self",
".",
"_schedules",
".",
"append",
"(",
"Scheduler",
"(",
"self",
".",
"app",
",",
"seconds",
",",
"func",
")",
")",
"return",
"wrap"
] |
Call a function periodically.
Parameters
----------
seconds : float
Minimum interval of function calls.
func : callable
Function to be called.
|
[
"Call",
"a",
"function",
"periodically",
"."
] |
python
|
train
|
ekmmetering/ekmmeters
|
ekmmeters.py
|
https://github.com/ekmmetering/ekmmeters/blob/b3748bdf30263bfa46ea40157bdf8df2522e1904/ekmmeters.py#L1803-L1822
|
def jsonRender(self, def_buf):
""" Translate the passed serial block into string only JSON.
Args:
def_buf (SerialBlock): Any :class:`~ekmmeters.SerialBlock` object.
Returns:
str: JSON rendering of meter record.
"""
try:
ret_dict = SerialBlock()
ret_dict[Field.Meter_Address] = self.getMeterAddress()
for fld in def_buf:
compare_fld = fld.upper()
if not "RESERVED" in compare_fld and not "CRC" in compare_fld:
ret_dict[str(fld)] = def_buf[fld][MeterData.StringValue]
except:
ekm_log(traceback.format_exc(sys.exc_info()))
return ""
return json.dumps(ret_dict, indent=4)
|
[
"def",
"jsonRender",
"(",
"self",
",",
"def_buf",
")",
":",
"try",
":",
"ret_dict",
"=",
"SerialBlock",
"(",
")",
"ret_dict",
"[",
"Field",
".",
"Meter_Address",
"]",
"=",
"self",
".",
"getMeterAddress",
"(",
")",
"for",
"fld",
"in",
"def_buf",
":",
"compare_fld",
"=",
"fld",
".",
"upper",
"(",
")",
"if",
"not",
"\"RESERVED\"",
"in",
"compare_fld",
"and",
"not",
"\"CRC\"",
"in",
"compare_fld",
":",
"ret_dict",
"[",
"str",
"(",
"fld",
")",
"]",
"=",
"def_buf",
"[",
"fld",
"]",
"[",
"MeterData",
".",
"StringValue",
"]",
"except",
":",
"ekm_log",
"(",
"traceback",
".",
"format_exc",
"(",
"sys",
".",
"exc_info",
"(",
")",
")",
")",
"return",
"\"\"",
"return",
"json",
".",
"dumps",
"(",
"ret_dict",
",",
"indent",
"=",
"4",
")"
] |
Translate the passed serial block into string only JSON.
Args:
def_buf (SerialBlock): Any :class:`~ekmmeters.SerialBlock` object.
Returns:
str: JSON rendering of meter record.
|
[
"Translate",
"the",
"passed",
"serial",
"block",
"into",
"string",
"only",
"JSON",
"."
] |
python
|
test
|
log2timeline/dfvfs
|
dfvfs/lib/cpio.py
|
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/lib/cpio.py#L231-L250
|
def _ReadFileEntries(self, file_object):
"""Reads the file entries from the cpio archive.
Args:
file_object (FileIO): file-like object.
"""
self._file_entries = {}
file_offset = 0
while file_offset < self._file_size or self._file_size == 0:
file_entry = self._ReadFileEntry(file_object, file_offset)
file_offset += file_entry.size
if file_entry.path == 'TRAILER!!!':
break
if file_entry.path in self._file_entries:
# TODO: alert on file entries with duplicate paths?
continue
self._file_entries[file_entry.path] = file_entry
|
[
"def",
"_ReadFileEntries",
"(",
"self",
",",
"file_object",
")",
":",
"self",
".",
"_file_entries",
"=",
"{",
"}",
"file_offset",
"=",
"0",
"while",
"file_offset",
"<",
"self",
".",
"_file_size",
"or",
"self",
".",
"_file_size",
"==",
"0",
":",
"file_entry",
"=",
"self",
".",
"_ReadFileEntry",
"(",
"file_object",
",",
"file_offset",
")",
"file_offset",
"+=",
"file_entry",
".",
"size",
"if",
"file_entry",
".",
"path",
"==",
"'TRAILER!!!'",
":",
"break",
"if",
"file_entry",
".",
"path",
"in",
"self",
".",
"_file_entries",
":",
"# TODO: alert on file entries with duplicate paths?",
"continue",
"self",
".",
"_file_entries",
"[",
"file_entry",
".",
"path",
"]",
"=",
"file_entry"
] |
Reads the file entries from the cpio archive.
Args:
file_object (FileIO): file-like object.
|
[
"Reads",
"the",
"file",
"entries",
"from",
"the",
"cpio",
"archive",
"."
] |
python
|
train
|
QUANTAXIS/QUANTAXIS
|
QUANTAXIS/QAFetch/QATdx.py
|
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/QATdx.py#L1649-L1730
|
def QA_fetch_get_option_50etf_contract_time_to_market():
'''
#🛠todo 获取期权合约的上市日期 ? 暂时没有。
:return: list Series
'''
result = QA_fetch_get_option_list('tdx')
# pprint.pprint(result)
# category market code name desc code
'''
fix here :
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
result['meaningful_name'] = None
C:\work_new\QUANTAXIS\QUANTAXIS\QAFetch\QATdx.py:1468: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
'''
# df = pd.DataFrame()
rows = []
result['meaningful_name'] = None
for idx in result.index:
# pprint.pprint((idx))
strCategory = result.loc[idx, "category"]
strMarket = result.loc[idx, "market"]
strCode = result.loc[idx, "code"] # 10001215
strName = result.loc[idx, 'name'] # 510050C9M03200
strDesc = result.loc[idx, 'desc'] # 10001215
if strName.startswith("510050"):
# print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, )
if strName.startswith("510050C"):
putcall = '50ETF,认购期权'
elif strName.startswith("510050P"):
putcall = '50ETF,认沽期权'
else:
putcall = "Unkown code name : " + strName
expireMonth = strName[7:8]
if expireMonth == 'A':
expireMonth = "10月"
elif expireMonth == 'B':
expireMonth = "11月"
elif expireMonth == 'C':
expireMonth = "12月"
else:
expireMonth = expireMonth + '月'
# 第12位期初设为“M”,并根据合约调整次数按照“A”至“Z”依序变更,如变更为“A”表示期权合约发生首次调整,变更为“B”表示期权合约发生第二次调整,依此类推;
# fix here : M ??
if strName[8:9] == "M":
adjust = "未调整"
elif strName[8:9] == 'A':
adjust = " 第1次调整"
elif strName[8:9] == 'B':
adjust = " 第2调整"
elif strName[8:9] == 'C':
adjust = " 第3次调整"
elif strName[8:9] == 'D':
adjust = " 第4次调整"
elif strName[8:9] == 'E':
adjust = " 第5次调整"
elif strName[8:9] == 'F':
adjust = " 第6次调整"
elif strName[8:9] == 'G':
adjust = " 第7次调整"
elif strName[8:9] == 'H':
adjust = " 第8次调整"
elif strName[8:9] == 'I':
adjust = " 第9次调整"
elif strName[8:9] == 'J':
adjust = " 第10次调整"
else:
adjust = " 第10次以上的调整,调整代码 %s" + strName[8:9]
executePrice = strName[9:]
result.loc[idx, 'meaningful_name'] = '%s,到期月份:%s,%s,行权价:%s' % (
putcall, expireMonth, adjust, executePrice)
row = result.loc[idx]
rows.append(row)
return rows
|
[
"def",
"QA_fetch_get_option_50etf_contract_time_to_market",
"(",
")",
":",
"result",
"=",
"QA_fetch_get_option_list",
"(",
"'tdx'",
")",
"# pprint.pprint(result)",
"# category market code name desc code",
"'''\n fix here : \n See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n result['meaningful_name'] = None\n C:\\work_new\\QUANTAXIS\\QUANTAXIS\\QAFetch\\QATdx.py:1468: SettingWithCopyWarning: \n A value is trying to be set on a copy of a slice from a DataFrame.\n Try using .loc[row_indexer,col_indexer] = value instead\n '''",
"# df = pd.DataFrame()",
"rows",
"=",
"[",
"]",
"result",
"[",
"'meaningful_name'",
"]",
"=",
"None",
"for",
"idx",
"in",
"result",
".",
"index",
":",
"# pprint.pprint((idx))",
"strCategory",
"=",
"result",
".",
"loc",
"[",
"idx",
",",
"\"category\"",
"]",
"strMarket",
"=",
"result",
".",
"loc",
"[",
"idx",
",",
"\"market\"",
"]",
"strCode",
"=",
"result",
".",
"loc",
"[",
"idx",
",",
"\"code\"",
"]",
"# 10001215",
"strName",
"=",
"result",
".",
"loc",
"[",
"idx",
",",
"'name'",
"]",
"# 510050C9M03200",
"strDesc",
"=",
"result",
".",
"loc",
"[",
"idx",
",",
"'desc'",
"]",
"# 10001215",
"if",
"strName",
".",
"startswith",
"(",
"\"510050\"",
")",
":",
"# print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, )",
"if",
"strName",
".",
"startswith",
"(",
"\"510050C\"",
")",
":",
"putcall",
"=",
"'50ETF,认购期权'",
"elif",
"strName",
".",
"startswith",
"(",
"\"510050P\"",
")",
":",
"putcall",
"=",
"'50ETF,认沽期权'",
"else",
":",
"putcall",
"=",
"\"Unkown code name : \" +",
"s",
"rName",
"expireMonth",
"=",
"strName",
"[",
"7",
":",
"8",
"]",
"if",
"expireMonth",
"==",
"'A'",
":",
"expireMonth",
"=",
"\"10月\"",
"elif",
"expireMonth",
"==",
"'B'",
":",
"expireMonth",
"=",
"\"11月\"",
"elif",
"expireMonth",
"==",
"'C'",
":",
"expireMonth",
"=",
"\"12月\"",
"else",
":",
"expireMonth",
"=",
"expireMonth",
"+",
"'月'",
"# 第12位期初设为“M”,并根据合约调整次数按照“A”至“Z”依序变更,如变更为“A”表示期权合约发生首次调整,变更为“B”表示期权合约发生第二次调整,依此类推;",
"# fix here : M ??",
"if",
"strName",
"[",
"8",
":",
"9",
"]",
"==",
"\"M\"",
":",
"adjust",
"=",
"\"未调整\"",
"elif",
"strName",
"[",
"8",
":",
"9",
"]",
"==",
"'A'",
":",
"adjust",
"=",
"\" 第1次调整\"",
"elif",
"strName",
"[",
"8",
":",
"9",
"]",
"==",
"'B'",
":",
"adjust",
"=",
"\" 第2调整\"",
"elif",
"strName",
"[",
"8",
":",
"9",
"]",
"==",
"'C'",
":",
"adjust",
"=",
"\" 第3次调整\"",
"elif",
"strName",
"[",
"8",
":",
"9",
"]",
"==",
"'D'",
":",
"adjust",
"=",
"\" 第4次调整\"",
"elif",
"strName",
"[",
"8",
":",
"9",
"]",
"==",
"'E'",
":",
"adjust",
"=",
"\" 第5次调整\"",
"elif",
"strName",
"[",
"8",
":",
"9",
"]",
"==",
"'F'",
":",
"adjust",
"=",
"\" 第6次调整\"",
"elif",
"strName",
"[",
"8",
":",
"9",
"]",
"==",
"'G'",
":",
"adjust",
"=",
"\" 第7次调整\"",
"elif",
"strName",
"[",
"8",
":",
"9",
"]",
"==",
"'H'",
":",
"adjust",
"=",
"\" 第8次调整\"",
"elif",
"strName",
"[",
"8",
":",
"9",
"]",
"==",
"'I'",
":",
"adjust",
"=",
"\" 第9次调整\"",
"elif",
"strName",
"[",
"8",
":",
"9",
"]",
"==",
"'J'",
":",
"adjust",
"=",
"\" 第10次调整\"",
"else",
":",
"adjust",
"=",
"\" 第10次以上的调整,调整代码 %s\" + strName[8:9]",
"",
"",
"",
"",
"",
"",
"",
"executePrice",
"=",
"strName",
"[",
"9",
":",
"]",
"result",
".",
"loc",
"[",
"idx",
",",
"'meaningful_name'",
"]",
"=",
"'%s,到期月份:%s,%s,行权价:%s' % (",
"",
"",
"putcall",
",",
"expireMonth",
",",
"adjust",
",",
"executePrice",
")",
"row",
"=",
"result",
".",
"loc",
"[",
"idx",
"]",
"rows",
".",
"append",
"(",
"row",
")",
"return",
"rows"
] |
#🛠todo 获取期权合约的上市日期 ? 暂时没有。
:return: list Series
|
[
"#🛠todo",
"获取期权合约的上市日期",
"?",
"暂时没有。",
":",
"return",
":",
"list",
"Series"
] |
python
|
train
|
python-rope/rope
|
rope/base/libutils.py
|
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/libutils.py#L11-L32
|
def path_to_resource(project, path, type=None):
"""Get the resource at path
You only need to specify `type` if `path` does not exist. It can
be either 'file' or 'folder'. If the type is `None` it is assumed
that the resource already exists.
Note that this function uses `Project.get_resource()`,
`Project.get_file()`, and `Project.get_folder()` methods.
"""
project_path = path_relative_to_project_root(project, path)
if project_path is None:
project_path = rope.base.project._realpath(path)
project = rope.base.project.get_no_project()
if type is None:
return project.get_resource(project_path)
if type == 'file':
return project.get_file(project_path)
if type == 'folder':
return project.get_folder(project_path)
return None
|
[
"def",
"path_to_resource",
"(",
"project",
",",
"path",
",",
"type",
"=",
"None",
")",
":",
"project_path",
"=",
"path_relative_to_project_root",
"(",
"project",
",",
"path",
")",
"if",
"project_path",
"is",
"None",
":",
"project_path",
"=",
"rope",
".",
"base",
".",
"project",
".",
"_realpath",
"(",
"path",
")",
"project",
"=",
"rope",
".",
"base",
".",
"project",
".",
"get_no_project",
"(",
")",
"if",
"type",
"is",
"None",
":",
"return",
"project",
".",
"get_resource",
"(",
"project_path",
")",
"if",
"type",
"==",
"'file'",
":",
"return",
"project",
".",
"get_file",
"(",
"project_path",
")",
"if",
"type",
"==",
"'folder'",
":",
"return",
"project",
".",
"get_folder",
"(",
"project_path",
")",
"return",
"None"
] |
Get the resource at path
You only need to specify `type` if `path` does not exist. It can
be either 'file' or 'folder'. If the type is `None` it is assumed
that the resource already exists.
Note that this function uses `Project.get_resource()`,
`Project.get_file()`, and `Project.get_folder()` methods.
|
[
"Get",
"the",
"resource",
"at",
"path"
] |
python
|
train
|
marcharper/python-ternary
|
ternary/plotting.py
|
https://github.com/marcharper/python-ternary/blob/a4bef393ec9df130d4b55707293c750498a01843/ternary/plotting.py#L53-L72
|
def plot(points, ax=None, permutation=None, **kwargs):
"""
Analogous to maplotlib.plot. Plots trajectory points where each point is a
tuple (x,y,z) satisfying x + y + z = scale (not checked). The tuples are
projected and plotted as a curve.
Parameters
----------
points: List of 3-tuples
The list of tuples to be plotted as a connected curve.
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
kwargs:
Any kwargs to pass through to matplotlib.
"""
if not ax:
fig, ax = pyplot.subplots()
xs, ys = project_sequence(points, permutation=permutation)
ax.plot(xs, ys, **kwargs)
return ax
|
[
"def",
"plot",
"(",
"points",
",",
"ax",
"=",
"None",
",",
"permutation",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"ax",
":",
"fig",
",",
"ax",
"=",
"pyplot",
".",
"subplots",
"(",
")",
"xs",
",",
"ys",
"=",
"project_sequence",
"(",
"points",
",",
"permutation",
"=",
"permutation",
")",
"ax",
".",
"plot",
"(",
"xs",
",",
"ys",
",",
"*",
"*",
"kwargs",
")",
"return",
"ax"
] |
Analogous to maplotlib.plot. Plots trajectory points where each point is a
tuple (x,y,z) satisfying x + y + z = scale (not checked). The tuples are
projected and plotted as a curve.
Parameters
----------
points: List of 3-tuples
The list of tuples to be plotted as a connected curve.
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
kwargs:
Any kwargs to pass through to matplotlib.
|
[
"Analogous",
"to",
"maplotlib",
".",
"plot",
".",
"Plots",
"trajectory",
"points",
"where",
"each",
"point",
"is",
"a",
"tuple",
"(",
"x",
"y",
"z",
")",
"satisfying",
"x",
"+",
"y",
"+",
"z",
"=",
"scale",
"(",
"not",
"checked",
")",
".",
"The",
"tuples",
"are",
"projected",
"and",
"plotted",
"as",
"a",
"curve",
"."
] |
python
|
train
|
pantsbuild/pants
|
src/python/pants/releases/reversion.py
|
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/releases/reversion.py#L21-L38
|
def replace_in_file(workspace, src_file_path, from_str, to_str):
"""Replace from_str with to_str in the name and content of the given file.
If any edits were necessary, returns the new filename (which may be the same as the old filename).
"""
from_bytes = from_str.encode('ascii')
to_bytes = to_str.encode('ascii')
data = read_file(os.path.join(workspace, src_file_path), binary_mode=True)
if from_bytes not in data and from_str not in src_file_path:
return None
dst_file_path = src_file_path.replace(from_str, to_str)
safe_file_dump(os.path.join(workspace, dst_file_path),
data.replace(from_bytes, to_bytes),
mode='wb')
if src_file_path != dst_file_path:
os.unlink(os.path.join(workspace, src_file_path))
return dst_file_path
|
[
"def",
"replace_in_file",
"(",
"workspace",
",",
"src_file_path",
",",
"from_str",
",",
"to_str",
")",
":",
"from_bytes",
"=",
"from_str",
".",
"encode",
"(",
"'ascii'",
")",
"to_bytes",
"=",
"to_str",
".",
"encode",
"(",
"'ascii'",
")",
"data",
"=",
"read_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"workspace",
",",
"src_file_path",
")",
",",
"binary_mode",
"=",
"True",
")",
"if",
"from_bytes",
"not",
"in",
"data",
"and",
"from_str",
"not",
"in",
"src_file_path",
":",
"return",
"None",
"dst_file_path",
"=",
"src_file_path",
".",
"replace",
"(",
"from_str",
",",
"to_str",
")",
"safe_file_dump",
"(",
"os",
".",
"path",
".",
"join",
"(",
"workspace",
",",
"dst_file_path",
")",
",",
"data",
".",
"replace",
"(",
"from_bytes",
",",
"to_bytes",
")",
",",
"mode",
"=",
"'wb'",
")",
"if",
"src_file_path",
"!=",
"dst_file_path",
":",
"os",
".",
"unlink",
"(",
"os",
".",
"path",
".",
"join",
"(",
"workspace",
",",
"src_file_path",
")",
")",
"return",
"dst_file_path"
] |
Replace from_str with to_str in the name and content of the given file.
If any edits were necessary, returns the new filename (which may be the same as the old filename).
|
[
"Replace",
"from_str",
"with",
"to_str",
"in",
"the",
"name",
"and",
"content",
"of",
"the",
"given",
"file",
"."
] |
python
|
train
|
stevearc/pyramid_webpack
|
pyramid_webpack/__init__.py
|
https://github.com/stevearc/pyramid_webpack/blob/4fcad26271fd6e8c270e19c7943240fea6d8c484/pyramid_webpack/__init__.py#L196-L209
|
def get_bundle(self, bundle_name, extensions=None):
""" Get all the chunks contained in a bundle """
if self.stats.get('status') == 'done':
bundle = self.stats.get('chunks', {}).get(bundle_name, None)
if bundle is None:
raise KeyError('No such bundle {0!r}.'.format(bundle_name))
test = self._chunk_filter(extensions)
return [self._add_url(c) for c in bundle if test(c)]
elif self.stats.get('status') == 'error':
raise RuntimeError("{error}: {message}".format(**self.stats))
else:
raise RuntimeError(
"Bad webpack stats file {0} status: {1!r}"
.format(self.state.stats_file, self.stats.get('status')))
|
[
"def",
"get_bundle",
"(",
"self",
",",
"bundle_name",
",",
"extensions",
"=",
"None",
")",
":",
"if",
"self",
".",
"stats",
".",
"get",
"(",
"'status'",
")",
"==",
"'done'",
":",
"bundle",
"=",
"self",
".",
"stats",
".",
"get",
"(",
"'chunks'",
",",
"{",
"}",
")",
".",
"get",
"(",
"bundle_name",
",",
"None",
")",
"if",
"bundle",
"is",
"None",
":",
"raise",
"KeyError",
"(",
"'No such bundle {0!r}.'",
".",
"format",
"(",
"bundle_name",
")",
")",
"test",
"=",
"self",
".",
"_chunk_filter",
"(",
"extensions",
")",
"return",
"[",
"self",
".",
"_add_url",
"(",
"c",
")",
"for",
"c",
"in",
"bundle",
"if",
"test",
"(",
"c",
")",
"]",
"elif",
"self",
".",
"stats",
".",
"get",
"(",
"'status'",
")",
"==",
"'error'",
":",
"raise",
"RuntimeError",
"(",
"\"{error}: {message}\"",
".",
"format",
"(",
"*",
"*",
"self",
".",
"stats",
")",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Bad webpack stats file {0} status: {1!r}\"",
".",
"format",
"(",
"self",
".",
"state",
".",
"stats_file",
",",
"self",
".",
"stats",
".",
"get",
"(",
"'status'",
")",
")",
")"
] |
Get all the chunks contained in a bundle
|
[
"Get",
"all",
"the",
"chunks",
"contained",
"in",
"a",
"bundle"
] |
python
|
train
|
materialsproject/pymatgen
|
pymatgen/analysis/chemenv/utils/coordination_geometry_utils.py
|
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/chemenv/utils/coordination_geometry_utils.py#L651-L660
|
def is_in_list(self, plane_list):
"""
Checks whether the plane is identical to one of the Planes in the plane_list list of Planes
:param plane_list: List of Planes to be compared to
:return: True if the plane is in the list, False otherwise
"""
for plane in plane_list:
if self.is_same_plane_as(plane):
return True
return False
|
[
"def",
"is_in_list",
"(",
"self",
",",
"plane_list",
")",
":",
"for",
"plane",
"in",
"plane_list",
":",
"if",
"self",
".",
"is_same_plane_as",
"(",
"plane",
")",
":",
"return",
"True",
"return",
"False"
] |
Checks whether the plane is identical to one of the Planes in the plane_list list of Planes
:param plane_list: List of Planes to be compared to
:return: True if the plane is in the list, False otherwise
|
[
"Checks",
"whether",
"the",
"plane",
"is",
"identical",
"to",
"one",
"of",
"the",
"Planes",
"in",
"the",
"plane_list",
"list",
"of",
"Planes",
":",
"param",
"plane_list",
":",
"List",
"of",
"Planes",
"to",
"be",
"compared",
"to",
":",
"return",
":",
"True",
"if",
"the",
"plane",
"is",
"in",
"the",
"list",
"False",
"otherwise"
] |
python
|
train
|
uchicago-cs/deepdish
|
deepdish/image.py
|
https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/image.py#L95-L106
|
def crop(im, size):
"""
Crops an image in the center.
Parameters
----------
size : tuple, (height, width)
Finally size after cropping.
"""
diff = [im.shape[index] - size[index] for index in (0, 1)]
im2 = im[diff[0]//2:diff[0]//2 + size[0], diff[1]//2:diff[1]//2 + size[1]]
return im2
|
[
"def",
"crop",
"(",
"im",
",",
"size",
")",
":",
"diff",
"=",
"[",
"im",
".",
"shape",
"[",
"index",
"]",
"-",
"size",
"[",
"index",
"]",
"for",
"index",
"in",
"(",
"0",
",",
"1",
")",
"]",
"im2",
"=",
"im",
"[",
"diff",
"[",
"0",
"]",
"//",
"2",
":",
"diff",
"[",
"0",
"]",
"//",
"2",
"+",
"size",
"[",
"0",
"]",
",",
"diff",
"[",
"1",
"]",
"//",
"2",
":",
"diff",
"[",
"1",
"]",
"//",
"2",
"+",
"size",
"[",
"1",
"]",
"]",
"return",
"im2"
] |
Crops an image in the center.
Parameters
----------
size : tuple, (height, width)
Finally size after cropping.
|
[
"Crops",
"an",
"image",
"in",
"the",
"center",
"."
] |
python
|
train
|
albertz/py_better_exchook
|
better_exchook.py
|
https://github.com/albertz/py_better_exchook/blob/3d524a027d7fc4e83e47e39a1978849561da69b3/better_exchook.py#L407-L425
|
def get_source_code(filename, lineno, module_globals):
"""
:param str filename:
:param int lineno:
:param dict[str] module_globals:
:return: source code of that line
:rtype: str
"""
import linecache
linecache.checkcache(filename)
source_code = linecache.getline(filename, lineno, module_globals)
# In case of a multi-line statement, lineno is usually the last line.
# We are checking for missing open brackets and add earlier code lines.
while is_source_code_missing_open_brackets(source_code):
if lineno <= 0:
break
lineno -= 1
source_code = "".join([linecache.getline(filename, lineno, module_globals), source_code])
return source_code
|
[
"def",
"get_source_code",
"(",
"filename",
",",
"lineno",
",",
"module_globals",
")",
":",
"import",
"linecache",
"linecache",
".",
"checkcache",
"(",
"filename",
")",
"source_code",
"=",
"linecache",
".",
"getline",
"(",
"filename",
",",
"lineno",
",",
"module_globals",
")",
"# In case of a multi-line statement, lineno is usually the last line.",
"# We are checking for missing open brackets and add earlier code lines.",
"while",
"is_source_code_missing_open_brackets",
"(",
"source_code",
")",
":",
"if",
"lineno",
"<=",
"0",
":",
"break",
"lineno",
"-=",
"1",
"source_code",
"=",
"\"\"",
".",
"join",
"(",
"[",
"linecache",
".",
"getline",
"(",
"filename",
",",
"lineno",
",",
"module_globals",
")",
",",
"source_code",
"]",
")",
"return",
"source_code"
] |
:param str filename:
:param int lineno:
:param dict[str] module_globals:
:return: source code of that line
:rtype: str
|
[
":",
"param",
"str",
"filename",
":",
":",
"param",
"int",
"lineno",
":",
":",
"param",
"dict",
"[",
"str",
"]",
"module_globals",
":",
":",
"return",
":",
"source",
"code",
"of",
"that",
"line",
":",
"rtype",
":",
"str"
] |
python
|
train
|
google/transitfeed
|
transitfeed/shapelib.py
|
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/shapelib.py#L279-L297
|
def GetClosestPoint(self, p):
"""
Returns (closest_p, closest_i), where closest_p is the closest point
to p on the piecewise linear curve represented by the polyline,
and closest_i is the index of the point on the polyline just before
the polyline segment that contains closest_p.
"""
assert(len(self._points) > 0)
closest_point = self._points[0]
closest_i = 0
for i in range(0, len(self._points) - 1):
(a, b) = (self._points[i], self._points[i+1])
cur_closest_point = GetClosestPoint(p, a, b)
if p.Angle(cur_closest_point) < p.Angle(closest_point):
closest_point = cur_closest_point.Normalize()
closest_i = i
return (closest_point, closest_i)
|
[
"def",
"GetClosestPoint",
"(",
"self",
",",
"p",
")",
":",
"assert",
"(",
"len",
"(",
"self",
".",
"_points",
")",
">",
"0",
")",
"closest_point",
"=",
"self",
".",
"_points",
"[",
"0",
"]",
"closest_i",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"_points",
")",
"-",
"1",
")",
":",
"(",
"a",
",",
"b",
")",
"=",
"(",
"self",
".",
"_points",
"[",
"i",
"]",
",",
"self",
".",
"_points",
"[",
"i",
"+",
"1",
"]",
")",
"cur_closest_point",
"=",
"GetClosestPoint",
"(",
"p",
",",
"a",
",",
"b",
")",
"if",
"p",
".",
"Angle",
"(",
"cur_closest_point",
")",
"<",
"p",
".",
"Angle",
"(",
"closest_point",
")",
":",
"closest_point",
"=",
"cur_closest_point",
".",
"Normalize",
"(",
")",
"closest_i",
"=",
"i",
"return",
"(",
"closest_point",
",",
"closest_i",
")"
] |
Returns (closest_p, closest_i), where closest_p is the closest point
to p on the piecewise linear curve represented by the polyline,
and closest_i is the index of the point on the polyline just before
the polyline segment that contains closest_p.
|
[
"Returns",
"(",
"closest_p",
"closest_i",
")",
"where",
"closest_p",
"is",
"the",
"closest",
"point",
"to",
"p",
"on",
"the",
"piecewise",
"linear",
"curve",
"represented",
"by",
"the",
"polyline",
"and",
"closest_i",
"is",
"the",
"index",
"of",
"the",
"point",
"on",
"the",
"polyline",
"just",
"before",
"the",
"polyline",
"segment",
"that",
"contains",
"closest_p",
"."
] |
python
|
train
|
yt-project/unyt
|
unyt/_parsing.py
|
https://github.com/yt-project/unyt/blob/7a4eafc229f83784f4c63d639aee554f9a6b1ca0/unyt/_parsing.py#L25-L68
|
def _auto_positive_symbol(tokens, local_dict, global_dict):
"""
Inserts calls to ``Symbol`` for undefined variables.
Passes in positive=True as a keyword argument.
Adapted from sympy.sympy.parsing.sympy_parser.auto_symbol
"""
result = []
tokens.append((None, None)) # so zip traverses all tokens
for tok, nextTok in zip(tokens, tokens[1:]):
tokNum, tokVal = tok
nextTokNum, nextTokVal = nextTok
if tokNum == token.NAME:
name = tokVal
if name in global_dict:
obj = global_dict[name]
if isinstance(obj, (Basic, type)) or callable(obj):
result.append((token.NAME, name))
continue
# try to resolve known alternative unit name
try:
used_name = inv_name_alternatives[str(name)]
except KeyError:
# if we don't know this name it's a user-defined unit name
# so we should create a new symbol for it
used_name = str(name)
result.extend(
[
(token.NAME, "Symbol"),
(token.OP, "("),
(token.NAME, repr(used_name)),
(token.OP, ","),
(token.NAME, "positive"),
(token.OP, "="),
(token.NAME, "True"),
(token.OP, ")"),
]
)
else:
result.append((tokNum, tokVal))
return result
|
[
"def",
"_auto_positive_symbol",
"(",
"tokens",
",",
"local_dict",
",",
"global_dict",
")",
":",
"result",
"=",
"[",
"]",
"tokens",
".",
"append",
"(",
"(",
"None",
",",
"None",
")",
")",
"# so zip traverses all tokens",
"for",
"tok",
",",
"nextTok",
"in",
"zip",
"(",
"tokens",
",",
"tokens",
"[",
"1",
":",
"]",
")",
":",
"tokNum",
",",
"tokVal",
"=",
"tok",
"nextTokNum",
",",
"nextTokVal",
"=",
"nextTok",
"if",
"tokNum",
"==",
"token",
".",
"NAME",
":",
"name",
"=",
"tokVal",
"if",
"name",
"in",
"global_dict",
":",
"obj",
"=",
"global_dict",
"[",
"name",
"]",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"Basic",
",",
"type",
")",
")",
"or",
"callable",
"(",
"obj",
")",
":",
"result",
".",
"append",
"(",
"(",
"token",
".",
"NAME",
",",
"name",
")",
")",
"continue",
"# try to resolve known alternative unit name",
"try",
":",
"used_name",
"=",
"inv_name_alternatives",
"[",
"str",
"(",
"name",
")",
"]",
"except",
"KeyError",
":",
"# if we don't know this name it's a user-defined unit name",
"# so we should create a new symbol for it",
"used_name",
"=",
"str",
"(",
"name",
")",
"result",
".",
"extend",
"(",
"[",
"(",
"token",
".",
"NAME",
",",
"\"Symbol\"",
")",
",",
"(",
"token",
".",
"OP",
",",
"\"(\"",
")",
",",
"(",
"token",
".",
"NAME",
",",
"repr",
"(",
"used_name",
")",
")",
",",
"(",
"token",
".",
"OP",
",",
"\",\"",
")",
",",
"(",
"token",
".",
"NAME",
",",
"\"positive\"",
")",
",",
"(",
"token",
".",
"OP",
",",
"\"=\"",
")",
",",
"(",
"token",
".",
"NAME",
",",
"\"True\"",
")",
",",
"(",
"token",
".",
"OP",
",",
"\")\"",
")",
",",
"]",
")",
"else",
":",
"result",
".",
"append",
"(",
"(",
"tokNum",
",",
"tokVal",
")",
")",
"return",
"result"
] |
Inserts calls to ``Symbol`` for undefined variables.
Passes in positive=True as a keyword argument.
Adapted from sympy.sympy.parsing.sympy_parser.auto_symbol
|
[
"Inserts",
"calls",
"to",
"Symbol",
"for",
"undefined",
"variables",
".",
"Passes",
"in",
"positive",
"=",
"True",
"as",
"a",
"keyword",
"argument",
".",
"Adapted",
"from",
"sympy",
".",
"sympy",
".",
"parsing",
".",
"sympy_parser",
".",
"auto_symbol"
] |
python
|
train
|
manolomartinez/greg
|
greg/classes.py
|
https://github.com/manolomartinez/greg/blob/63bb24197c13087a01963ac439cd8380007d9467/greg/classes.py#L202-L217
|
def will_tag(self):
"""
Check whether the feed should be tagged
"""
wanttags = self.retrieve_config('Tag', 'no')
if wanttags == 'yes':
if aux.staggerexists:
willtag = True
else:
willtag = False
print(("You want me to tag {0}, but you have not installed "
"the Stagger module. I cannot honour your request.").
format(self.name), file=sys.stderr, flush=True)
else:
willtag = False
return willtag
|
[
"def",
"will_tag",
"(",
"self",
")",
":",
"wanttags",
"=",
"self",
".",
"retrieve_config",
"(",
"'Tag'",
",",
"'no'",
")",
"if",
"wanttags",
"==",
"'yes'",
":",
"if",
"aux",
".",
"staggerexists",
":",
"willtag",
"=",
"True",
"else",
":",
"willtag",
"=",
"False",
"print",
"(",
"(",
"\"You want me to tag {0}, but you have not installed \"",
"\"the Stagger module. I cannot honour your request.\"",
")",
".",
"format",
"(",
"self",
".",
"name",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
",",
"flush",
"=",
"True",
")",
"else",
":",
"willtag",
"=",
"False",
"return",
"willtag"
] |
Check whether the feed should be tagged
|
[
"Check",
"whether",
"the",
"feed",
"should",
"be",
"tagged"
] |
python
|
train
|
kiwiz/gkeepapi
|
gkeepapi/node.py
|
https://github.com/kiwiz/gkeepapi/blob/78aaae8b988b1cf616e3973f7f15d4c6d5e996cc/gkeepapi/node.py#L1140-L1151
|
def remove(self, node, dirty=True):
"""Remove the given child node.
Args:
node (gkeepapi.Node): Node to remove.
dirty (bool): Whether this node should be marked dirty.
"""
if node.id in self._children:
self._children[node.id].parent = None
del self._children[node.id]
if dirty:
self.touch()
|
[
"def",
"remove",
"(",
"self",
",",
"node",
",",
"dirty",
"=",
"True",
")",
":",
"if",
"node",
".",
"id",
"in",
"self",
".",
"_children",
":",
"self",
".",
"_children",
"[",
"node",
".",
"id",
"]",
".",
"parent",
"=",
"None",
"del",
"self",
".",
"_children",
"[",
"node",
".",
"id",
"]",
"if",
"dirty",
":",
"self",
".",
"touch",
"(",
")"
] |
Remove the given child node.
Args:
node (gkeepapi.Node): Node to remove.
dirty (bool): Whether this node should be marked dirty.
|
[
"Remove",
"the",
"given",
"child",
"node",
"."
] |
python
|
train
|
DataDog/integrations-core
|
kubelet/datadog_checks/kubelet/common.py
|
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/kubelet/datadog_checks/kubelet/common.py#L41-L54
|
def get_pod_by_uid(uid, podlist):
"""
Searches for a pod uid in the podlist and returns the pod if found
:param uid: pod uid
:param podlist: podlist dict object
:return: pod dict object if found, None if not found
"""
for pod in podlist.get("items", []):
try:
if pod["metadata"]["uid"] == uid:
return pod
except KeyError:
continue
return None
|
[
"def",
"get_pod_by_uid",
"(",
"uid",
",",
"podlist",
")",
":",
"for",
"pod",
"in",
"podlist",
".",
"get",
"(",
"\"items\"",
",",
"[",
"]",
")",
":",
"try",
":",
"if",
"pod",
"[",
"\"metadata\"",
"]",
"[",
"\"uid\"",
"]",
"==",
"uid",
":",
"return",
"pod",
"except",
"KeyError",
":",
"continue",
"return",
"None"
] |
Searches for a pod uid in the podlist and returns the pod if found
:param uid: pod uid
:param podlist: podlist dict object
:return: pod dict object if found, None if not found
|
[
"Searches",
"for",
"a",
"pod",
"uid",
"in",
"the",
"podlist",
"and",
"returns",
"the",
"pod",
"if",
"found",
":",
"param",
"uid",
":",
"pod",
"uid",
":",
"param",
"podlist",
":",
"podlist",
"dict",
"object",
":",
"return",
":",
"pod",
"dict",
"object",
"if",
"found",
"None",
"if",
"not",
"found"
] |
python
|
train
|
mozilla/mozdownload
|
mozdownload/parser.py
|
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/parser.py#L81-L89
|
def handle_data(self, data):
"""Callback when the data of a tag has been collected."""
# Only process the data when we are in an active a tag and have an URL.
if not self.active_url:
return
# The visible text can have a final slash so strip it off
if data.strip('/') == self.active_url:
self.entries.append(self.active_url)
|
[
"def",
"handle_data",
"(",
"self",
",",
"data",
")",
":",
"# Only process the data when we are in an active a tag and have an URL.",
"if",
"not",
"self",
".",
"active_url",
":",
"return",
"# The visible text can have a final slash so strip it off",
"if",
"data",
".",
"strip",
"(",
"'/'",
")",
"==",
"self",
".",
"active_url",
":",
"self",
".",
"entries",
".",
"append",
"(",
"self",
".",
"active_url",
")"
] |
Callback when the data of a tag has been collected.
|
[
"Callback",
"when",
"the",
"data",
"of",
"a",
"tag",
"has",
"been",
"collected",
"."
] |
python
|
train
|
woolfson-group/isambard
|
isambard/ampal/assembly.py
|
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/assembly.py#L686-L690
|
def repack_all(self):
"""Repacks the side chains of all Polymers in the Assembly."""
non_na_sequences = [s for s in self.sequences if ' ' not in s]
self.pack_new_sequences(non_na_sequences)
return
|
[
"def",
"repack_all",
"(",
"self",
")",
":",
"non_na_sequences",
"=",
"[",
"s",
"for",
"s",
"in",
"self",
".",
"sequences",
"if",
"' '",
"not",
"in",
"s",
"]",
"self",
".",
"pack_new_sequences",
"(",
"non_na_sequences",
")",
"return"
] |
Repacks the side chains of all Polymers in the Assembly.
|
[
"Repacks",
"the",
"side",
"chains",
"of",
"all",
"Polymers",
"in",
"the",
"Assembly",
"."
] |
python
|
train
|
PyFilesystem/pyfilesystem2
|
fs/multifs.py
|
https://github.com/PyFilesystem/pyfilesystem2/blob/047f3593f297d1442194cda3da7a7335bcc9c14a/fs/multifs.py#L156-L163
|
def _delegate_required(self, path):
# type: (Text) -> FS
"""Check that there is a filesystem with the given ``path``.
"""
fs = self._delegate(path)
if fs is None:
raise errors.ResourceNotFound(path)
return fs
|
[
"def",
"_delegate_required",
"(",
"self",
",",
"path",
")",
":",
"# type: (Text) -> FS",
"fs",
"=",
"self",
".",
"_delegate",
"(",
"path",
")",
"if",
"fs",
"is",
"None",
":",
"raise",
"errors",
".",
"ResourceNotFound",
"(",
"path",
")",
"return",
"fs"
] |
Check that there is a filesystem with the given ``path``.
|
[
"Check",
"that",
"there",
"is",
"a",
"filesystem",
"with",
"the",
"given",
"path",
"."
] |
python
|
train
|
pirate/mesh-networking
|
mesh/programs.py
|
https://github.com/pirate/mesh-networking/blob/e8da35d2ecded6930cf2180605bf28479ee555c7/mesh/programs.py#L21-L28
|
def run(self):
"""runloop that reads packets off the node's incoming packet buffer (node.inq)"""
while self.keep_listening:
for interface in self.node.interfaces:
try:
self.recv(self.node.inq[interface].get(timeout=0), interface)
except Empty:
sleep(0.01)
|
[
"def",
"run",
"(",
"self",
")",
":",
"while",
"self",
".",
"keep_listening",
":",
"for",
"interface",
"in",
"self",
".",
"node",
".",
"interfaces",
":",
"try",
":",
"self",
".",
"recv",
"(",
"self",
".",
"node",
".",
"inq",
"[",
"interface",
"]",
".",
"get",
"(",
"timeout",
"=",
"0",
")",
",",
"interface",
")",
"except",
"Empty",
":",
"sleep",
"(",
"0.01",
")"
] |
runloop that reads packets off the node's incoming packet buffer (node.inq)
|
[
"runloop",
"that",
"reads",
"packets",
"off",
"the",
"node",
"s",
"incoming",
"packet",
"buffer",
"(",
"node",
".",
"inq",
")"
] |
python
|
train
|
vanheeringen-lab/gimmemotifs
|
gimmemotifs/tools.py
|
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1266-L1299
|
def parse(self, fo):
"""
Convert MotifSampler output to motifs
Parameters
----------
fo : file-like
File object containing MotifSampler output.
Returns
-------
motifs : list
List of Motif instances.
"""
motifs = []
pwm = []
info = {}
for line in fo.readlines():
if line.startswith("#"):
vals = line.strip()[1:].split(" = ")
if len(vals) > 1:
info[vals[0]] = vals[1]
elif len(line) > 1:
pwm.append([float(x) for x in line.strip().split("\t")])
else:
motifs.append(Motif())
motifs[-1].consensus = info["Consensus"]
motifs[-1].width = info["W"]
motifs[-1].id = info["ID"]
motifs[-1].pwm = pwm[:]
pwm = []
return motifs
|
[
"def",
"parse",
"(",
"self",
",",
"fo",
")",
":",
"motifs",
"=",
"[",
"]",
"pwm",
"=",
"[",
"]",
"info",
"=",
"{",
"}",
"for",
"line",
"in",
"fo",
".",
"readlines",
"(",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"vals",
"=",
"line",
".",
"strip",
"(",
")",
"[",
"1",
":",
"]",
".",
"split",
"(",
"\" = \"",
")",
"if",
"len",
"(",
"vals",
")",
">",
"1",
":",
"info",
"[",
"vals",
"[",
"0",
"]",
"]",
"=",
"vals",
"[",
"1",
"]",
"elif",
"len",
"(",
"line",
")",
">",
"1",
":",
"pwm",
".",
"append",
"(",
"[",
"float",
"(",
"x",
")",
"for",
"x",
"in",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"]",
")",
"else",
":",
"motifs",
".",
"append",
"(",
"Motif",
"(",
")",
")",
"motifs",
"[",
"-",
"1",
"]",
".",
"consensus",
"=",
"info",
"[",
"\"Consensus\"",
"]",
"motifs",
"[",
"-",
"1",
"]",
".",
"width",
"=",
"info",
"[",
"\"W\"",
"]",
"motifs",
"[",
"-",
"1",
"]",
".",
"id",
"=",
"info",
"[",
"\"ID\"",
"]",
"motifs",
"[",
"-",
"1",
"]",
".",
"pwm",
"=",
"pwm",
"[",
":",
"]",
"pwm",
"=",
"[",
"]",
"return",
"motifs"
] |
Convert MotifSampler output to motifs
Parameters
----------
fo : file-like
File object containing MotifSampler output.
Returns
-------
motifs : list
List of Motif instances.
|
[
"Convert",
"MotifSampler",
"output",
"to",
"motifs",
"Parameters",
"----------",
"fo",
":",
"file",
"-",
"like",
"File",
"object",
"containing",
"MotifSampler",
"output",
"."
] |
python
|
train
|
basho/riak-python-client
|
riak/transports/http/transport.py
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/http/transport.py#L225-L238
|
def get_buckets(self, bucket_type=None, timeout=None):
"""
Fetch a list of all buckets
"""
bucket_type = self._get_bucket_type(bucket_type)
url = self.bucket_list_path(bucket_type=bucket_type,
timeout=timeout)
status, headers, body = self._request('GET', url)
if status == 200:
props = json.loads(bytes_to_str(body))
return props['buckets']
else:
raise RiakError('Error getting buckets.')
|
[
"def",
"get_buckets",
"(",
"self",
",",
"bucket_type",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"bucket_type",
"=",
"self",
".",
"_get_bucket_type",
"(",
"bucket_type",
")",
"url",
"=",
"self",
".",
"bucket_list_path",
"(",
"bucket_type",
"=",
"bucket_type",
",",
"timeout",
"=",
"timeout",
")",
"status",
",",
"headers",
",",
"body",
"=",
"self",
".",
"_request",
"(",
"'GET'",
",",
"url",
")",
"if",
"status",
"==",
"200",
":",
"props",
"=",
"json",
".",
"loads",
"(",
"bytes_to_str",
"(",
"body",
")",
")",
"return",
"props",
"[",
"'buckets'",
"]",
"else",
":",
"raise",
"RiakError",
"(",
"'Error getting buckets.'",
")"
] |
Fetch a list of all buckets
|
[
"Fetch",
"a",
"list",
"of",
"all",
"buckets"
] |
python
|
train
|
mila-iqia/fuel
|
fuel/converters/youtube_audio.py
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/youtube_audio.py#L11-L62
|
def convert_youtube_audio(directory, output_directory, youtube_id, channels,
sample, output_filename=None):
"""Converts downloaded YouTube audio to HDF5 format.
Requires `ffmpeg` to be installed and available on the command line
(i.e. available on your `PATH`).
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
youtube_id : str
11-character video ID (taken from YouTube URL)
channels : int
The number of audio channels to use in the PCM Wave file.
sample : int
The sampling rate to use in Hz, e.g. 44100 or 16000.
output_filename : str, optional
Name of the saved dataset. If `None` (the default),
`youtube_id.hdf5` is used.
"""
input_file = os.path.join(directory, '{}.m4a'.format(youtube_id))
wav_filename = '{}.wav'.format(youtube_id)
wav_file = os.path.join(directory, wav_filename)
ffmpeg_not_available = subprocess.call(['ffmpeg', '-version'])
if ffmpeg_not_available:
raise RuntimeError('conversion requires ffmpeg')
subprocess.check_call(['ffmpeg', '-y', '-i', input_file, '-ac',
str(channels), '-ar', str(sample), wav_file],
stdout=sys.stdout)
# Load WAV into array
_, data = scipy.io.wavfile.read(wav_file)
if data.ndim == 1:
data = data[:, None]
data = data[None, :]
# Store in HDF5
if output_filename is None:
output_filename = '{}.hdf5'.format(youtube_id)
output_file = os.path.join(output_directory, output_filename)
with h5py.File(output_file, 'w') as h5file:
fill_hdf5_file(h5file, (('train', 'features', data),))
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'time'
h5file['features'].dims[2].label = 'feature'
return (output_file,)
|
[
"def",
"convert_youtube_audio",
"(",
"directory",
",",
"output_directory",
",",
"youtube_id",
",",
"channels",
",",
"sample",
",",
"output_filename",
"=",
"None",
")",
":",
"input_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"'{}.m4a'",
".",
"format",
"(",
"youtube_id",
")",
")",
"wav_filename",
"=",
"'{}.wav'",
".",
"format",
"(",
"youtube_id",
")",
"wav_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"wav_filename",
")",
"ffmpeg_not_available",
"=",
"subprocess",
".",
"call",
"(",
"[",
"'ffmpeg'",
",",
"'-version'",
"]",
")",
"if",
"ffmpeg_not_available",
":",
"raise",
"RuntimeError",
"(",
"'conversion requires ffmpeg'",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'ffmpeg'",
",",
"'-y'",
",",
"'-i'",
",",
"input_file",
",",
"'-ac'",
",",
"str",
"(",
"channels",
")",
",",
"'-ar'",
",",
"str",
"(",
"sample",
")",
",",
"wav_file",
"]",
",",
"stdout",
"=",
"sys",
".",
"stdout",
")",
"# Load WAV into array",
"_",
",",
"data",
"=",
"scipy",
".",
"io",
".",
"wavfile",
".",
"read",
"(",
"wav_file",
")",
"if",
"data",
".",
"ndim",
"==",
"1",
":",
"data",
"=",
"data",
"[",
":",
",",
"None",
"]",
"data",
"=",
"data",
"[",
"None",
",",
":",
"]",
"# Store in HDF5",
"if",
"output_filename",
"is",
"None",
":",
"output_filename",
"=",
"'{}.hdf5'",
".",
"format",
"(",
"youtube_id",
")",
"output_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"output_filename",
")",
"with",
"h5py",
".",
"File",
"(",
"output_file",
",",
"'w'",
")",
"as",
"h5file",
":",
"fill_hdf5_file",
"(",
"h5file",
",",
"(",
"(",
"'train'",
",",
"'features'",
",",
"data",
")",
",",
")",
")",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"0",
"]",
".",
"label",
"=",
"'batch'",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"1",
"]",
".",
"label",
"=",
"'time'",
"h5file",
"[",
"'features'",
"]",
".",
"dims",
"[",
"2",
"]",
".",
"label",
"=",
"'feature'",
"return",
"(",
"output_file",
",",
")"
] |
Converts downloaded YouTube audio to HDF5 format.
Requires `ffmpeg` to be installed and available on the command line
(i.e. available on your `PATH`).
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
youtube_id : str
11-character video ID (taken from YouTube URL)
channels : int
The number of audio channels to use in the PCM Wave file.
sample : int
The sampling rate to use in Hz, e.g. 44100 or 16000.
output_filename : str, optional
Name of the saved dataset. If `None` (the default),
`youtube_id.hdf5` is used.
|
[
"Converts",
"downloaded",
"YouTube",
"audio",
"to",
"HDF5",
"format",
"."
] |
python
|
train
|
google/mobly
|
mobly/controllers/android_device.py
|
https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/android_device.py#L164-L183
|
def parse_device_list(device_list_str, key):
"""Parses a byte string representing a list of devices.
The string is generated by calling either adb or fastboot. The tokens in
each string is tab-separated.
Args:
device_list_str: Output of adb or fastboot.
key: The token that signifies a device in device_list_str.
Returns:
A list of android device serial numbers.
"""
clean_lines = new_str(device_list_str, 'utf-8').strip().split('\n')
results = []
for line in clean_lines:
tokens = line.strip().split('\t')
if len(tokens) == 2 and tokens[1] == key:
results.append(tokens[0])
return results
|
[
"def",
"parse_device_list",
"(",
"device_list_str",
",",
"key",
")",
":",
"clean_lines",
"=",
"new_str",
"(",
"device_list_str",
",",
"'utf-8'",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"results",
"=",
"[",
"]",
"for",
"line",
"in",
"clean_lines",
":",
"tokens",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"if",
"len",
"(",
"tokens",
")",
"==",
"2",
"and",
"tokens",
"[",
"1",
"]",
"==",
"key",
":",
"results",
".",
"append",
"(",
"tokens",
"[",
"0",
"]",
")",
"return",
"results"
] |
Parses a byte string representing a list of devices.
The string is generated by calling either adb or fastboot. The tokens in
each string is tab-separated.
Args:
device_list_str: Output of adb or fastboot.
key: The token that signifies a device in device_list_str.
Returns:
A list of android device serial numbers.
|
[
"Parses",
"a",
"byte",
"string",
"representing",
"a",
"list",
"of",
"devices",
"."
] |
python
|
train
|
scanny/python-pptx
|
pptx/oxml/chart/datalabel.py
|
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/chart/datalabel.py#L95-L103
|
def remove_tx_rich(self):
"""
Remove any `c:tx[c:rich]` child, or do nothing if not present.
"""
matches = self.xpath('c:tx[c:rich]')
if not matches:
return
tx = matches[0]
self.remove(tx)
|
[
"def",
"remove_tx_rich",
"(",
"self",
")",
":",
"matches",
"=",
"self",
".",
"xpath",
"(",
"'c:tx[c:rich]'",
")",
"if",
"not",
"matches",
":",
"return",
"tx",
"=",
"matches",
"[",
"0",
"]",
"self",
".",
"remove",
"(",
"tx",
")"
] |
Remove any `c:tx[c:rich]` child, or do nothing if not present.
|
[
"Remove",
"any",
"c",
":",
"tx",
"[",
"c",
":",
"rich",
"]",
"child",
"or",
"do",
"nothing",
"if",
"not",
"present",
"."
] |
python
|
train
|
CityOfZion/neo-python
|
neo/Core/State/StorageItem.py
|
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/State/StorageItem.py#L79-L87
|
def Serialize(self, writer):
"""
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
"""
super(StorageItem, self).Serialize(writer)
writer.WriteVarBytes(self.Value)
|
[
"def",
"Serialize",
"(",
"self",
",",
"writer",
")",
":",
"super",
"(",
"StorageItem",
",",
"self",
")",
".",
"Serialize",
"(",
"writer",
")",
"writer",
".",
"WriteVarBytes",
"(",
"self",
".",
"Value",
")"
] |
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
|
[
"Serialize",
"full",
"object",
"."
] |
python
|
train
|
googleapis/google-cloud-python
|
bigquery_storage/google/cloud/bigquery_storage_v1beta1/reader.py
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery_storage/google/cloud/bigquery_storage_v1beta1/reader.py#L327-L332
|
def next(self):
"""Get the next row in the page."""
self._parse_block()
if self._remaining > 0:
self._remaining -= 1
return six.next(self._iter_rows)
|
[
"def",
"next",
"(",
"self",
")",
":",
"self",
".",
"_parse_block",
"(",
")",
"if",
"self",
".",
"_remaining",
">",
"0",
":",
"self",
".",
"_remaining",
"-=",
"1",
"return",
"six",
".",
"next",
"(",
"self",
".",
"_iter_rows",
")"
] |
Get the next row in the page.
|
[
"Get",
"the",
"next",
"row",
"in",
"the",
"page",
"."
] |
python
|
train
|
Julius2342/pyvlx
|
pyvlx/opening_device.py
|
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/opening_device.py#L24-L41
|
async def set_position(self, position, wait_for_completion=True):
"""Set window to desired position.
Parameters:
* position: Position object containing the target position.
* wait_for_completion: If set, function will return
after device has reached target position.
"""
command_send = CommandSend(
pyvlx=self.pyvlx,
wait_for_completion=wait_for_completion,
node_id=self.node_id,
parameter=position)
await command_send.do_api_call()
if not command_send.success:
raise PyVLXException("Unable to send command")
await self.after_update()
|
[
"async",
"def",
"set_position",
"(",
"self",
",",
"position",
",",
"wait_for_completion",
"=",
"True",
")",
":",
"command_send",
"=",
"CommandSend",
"(",
"pyvlx",
"=",
"self",
".",
"pyvlx",
",",
"wait_for_completion",
"=",
"wait_for_completion",
",",
"node_id",
"=",
"self",
".",
"node_id",
",",
"parameter",
"=",
"position",
")",
"await",
"command_send",
".",
"do_api_call",
"(",
")",
"if",
"not",
"command_send",
".",
"success",
":",
"raise",
"PyVLXException",
"(",
"\"Unable to send command\"",
")",
"await",
"self",
".",
"after_update",
"(",
")"
] |
Set window to desired position.
Parameters:
* position: Position object containing the target position.
* wait_for_completion: If set, function will return
after device has reached target position.
|
[
"Set",
"window",
"to",
"desired",
"position",
"."
] |
python
|
train
|
fermiPy/fermipy
|
fermipy/jobs/slac_impl.py
|
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/slac_impl.py#L128-L174
|
def dispatch_job_hook(self, link, key, job_config, logfile, stream=sys.stdout):
"""Send a single job to the LSF batch
Parameters
----------
link : `fermipy.jobs.chain.Link`
The link used to invoke the command we are running
key : str
A string that identifies this particular instance of the job
job_config : dict
A dictionrary with the arguments for the job. Used with
the self._command_template job template
logfile : str
The logfile for this job, may be used to check for success/ failure
"""
full_sub_dict = job_config.copy()
if self._no_batch:
full_command = "%s >& %s" % (
link.command_template().format(**full_sub_dict), logfile)
else:
full_sub_dict['logfile'] = logfile
full_command_template = build_bsub_command(
link.command_template(), self._lsf_args)
full_command = full_command_template.format(**full_sub_dict)
logdir = os.path.dirname(logfile)
print_bsub = True
if self._dry_run:
if print_bsub:
stream.write("%s\n" % full_command)
return 0
try:
os.makedirs(logdir)
except OSError:
pass
proc = subprocess.Popen(full_command.split(),
stderr=stream,
stdout=stream)
proc.communicate()
return proc.returncode
|
[
"def",
"dispatch_job_hook",
"(",
"self",
",",
"link",
",",
"key",
",",
"job_config",
",",
"logfile",
",",
"stream",
"=",
"sys",
".",
"stdout",
")",
":",
"full_sub_dict",
"=",
"job_config",
".",
"copy",
"(",
")",
"if",
"self",
".",
"_no_batch",
":",
"full_command",
"=",
"\"%s >& %s\"",
"%",
"(",
"link",
".",
"command_template",
"(",
")",
".",
"format",
"(",
"*",
"*",
"full_sub_dict",
")",
",",
"logfile",
")",
"else",
":",
"full_sub_dict",
"[",
"'logfile'",
"]",
"=",
"logfile",
"full_command_template",
"=",
"build_bsub_command",
"(",
"link",
".",
"command_template",
"(",
")",
",",
"self",
".",
"_lsf_args",
")",
"full_command",
"=",
"full_command_template",
".",
"format",
"(",
"*",
"*",
"full_sub_dict",
")",
"logdir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"logfile",
")",
"print_bsub",
"=",
"True",
"if",
"self",
".",
"_dry_run",
":",
"if",
"print_bsub",
":",
"stream",
".",
"write",
"(",
"\"%s\\n\"",
"%",
"full_command",
")",
"return",
"0",
"try",
":",
"os",
".",
"makedirs",
"(",
"logdir",
")",
"except",
"OSError",
":",
"pass",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"full_command",
".",
"split",
"(",
")",
",",
"stderr",
"=",
"stream",
",",
"stdout",
"=",
"stream",
")",
"proc",
".",
"communicate",
"(",
")",
"return",
"proc",
".",
"returncode"
] |
Send a single job to the LSF batch
Parameters
----------
link : `fermipy.jobs.chain.Link`
The link used to invoke the command we are running
key : str
A string that identifies this particular instance of the job
job_config : dict
A dictionrary with the arguments for the job. Used with
the self._command_template job template
logfile : str
The logfile for this job, may be used to check for success/ failure
|
[
"Send",
"a",
"single",
"job",
"to",
"the",
"LSF",
"batch"
] |
python
|
train
|
saltstack/salt
|
salt/modules/boto_ec2.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_ec2.py#L1626-L1694
|
def modify_network_interface_attribute(
name=None, network_interface_id=None, attr=None,
value=None, region=None, key=None, keyid=None, profile=None):
'''
Modify an attribute of an Elastic Network Interface.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt myminion boto_ec2.modify_network_interface_attribute my_eni attr=description value='example description'
'''
if not (name or network_interface_id):
raise SaltInvocationError(
'Either name or network_interface_id must be provided.'
)
if attr is None and value is None:
raise SaltInvocationError(
'attr and value must be provided.'
)
r = {}
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
result = _get_network_interface(conn, name, network_interface_id)
if 'error' in result:
return result
eni = result['result']
info = _describe_network_interface(eni)
network_interface_id = info['id']
# munge attr into what the API requires
if attr == 'groups':
_attr = 'groupSet'
elif attr == 'source_dest_check':
_attr = 'sourceDestCheck'
elif attr == 'delete_on_termination':
_attr = 'deleteOnTermination'
else:
_attr = attr
_value = value
if info.get('vpc_id') and _attr == 'groupSet':
_value = __salt__['boto_secgroup.convert_to_group_ids'](
value, vpc_id=info.get('vpc_id'), region=region, key=key,
keyid=keyid, profile=profile
)
if not _value:
r['error'] = {
'message': ('Security groups do not map to valid security'
' group ids')
}
return r
_attachment_id = None
if _attr == 'deleteOnTermination':
try:
_attachment_id = info['attachment']['id']
except KeyError:
r['error'] = {
'message': ('No attachment id found for this ENI. The ENI must'
' be attached before delete_on_termination can be'
' modified')
}
return r
try:
r['result'] = conn.modify_network_interface_attribute(
network_interface_id, _attr, _value, attachment_id=_attachment_id
)
except boto.exception.EC2ResponseError as e:
r['error'] = __utils__['boto.get_error'](e)
return r
|
[
"def",
"modify_network_interface_attribute",
"(",
"name",
"=",
"None",
",",
"network_interface_id",
"=",
"None",
",",
"attr",
"=",
"None",
",",
"value",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"if",
"not",
"(",
"name",
"or",
"network_interface_id",
")",
":",
"raise",
"SaltInvocationError",
"(",
"'Either name or network_interface_id must be provided.'",
")",
"if",
"attr",
"is",
"None",
"and",
"value",
"is",
"None",
":",
"raise",
"SaltInvocationError",
"(",
"'attr and value must be provided.'",
")",
"r",
"=",
"{",
"}",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"result",
"=",
"_get_network_interface",
"(",
"conn",
",",
"name",
",",
"network_interface_id",
")",
"if",
"'error'",
"in",
"result",
":",
"return",
"result",
"eni",
"=",
"result",
"[",
"'result'",
"]",
"info",
"=",
"_describe_network_interface",
"(",
"eni",
")",
"network_interface_id",
"=",
"info",
"[",
"'id'",
"]",
"# munge attr into what the API requires",
"if",
"attr",
"==",
"'groups'",
":",
"_attr",
"=",
"'groupSet'",
"elif",
"attr",
"==",
"'source_dest_check'",
":",
"_attr",
"=",
"'sourceDestCheck'",
"elif",
"attr",
"==",
"'delete_on_termination'",
":",
"_attr",
"=",
"'deleteOnTermination'",
"else",
":",
"_attr",
"=",
"attr",
"_value",
"=",
"value",
"if",
"info",
".",
"get",
"(",
"'vpc_id'",
")",
"and",
"_attr",
"==",
"'groupSet'",
":",
"_value",
"=",
"__salt__",
"[",
"'boto_secgroup.convert_to_group_ids'",
"]",
"(",
"value",
",",
"vpc_id",
"=",
"info",
".",
"get",
"(",
"'vpc_id'",
")",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"not",
"_value",
":",
"r",
"[",
"'error'",
"]",
"=",
"{",
"'message'",
":",
"(",
"'Security groups do not map to valid security'",
"' group ids'",
")",
"}",
"return",
"r",
"_attachment_id",
"=",
"None",
"if",
"_attr",
"==",
"'deleteOnTermination'",
":",
"try",
":",
"_attachment_id",
"=",
"info",
"[",
"'attachment'",
"]",
"[",
"'id'",
"]",
"except",
"KeyError",
":",
"r",
"[",
"'error'",
"]",
"=",
"{",
"'message'",
":",
"(",
"'No attachment id found for this ENI. The ENI must'",
"' be attached before delete_on_termination can be'",
"' modified'",
")",
"}",
"return",
"r",
"try",
":",
"r",
"[",
"'result'",
"]",
"=",
"conn",
".",
"modify_network_interface_attribute",
"(",
"network_interface_id",
",",
"_attr",
",",
"_value",
",",
"attachment_id",
"=",
"_attachment_id",
")",
"except",
"boto",
".",
"exception",
".",
"EC2ResponseError",
"as",
"e",
":",
"r",
"[",
"'error'",
"]",
"=",
"__utils__",
"[",
"'boto.get_error'",
"]",
"(",
"e",
")",
"return",
"r"
] |
Modify an attribute of an Elastic Network Interface.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt myminion boto_ec2.modify_network_interface_attribute my_eni attr=description value='example description'
|
[
"Modify",
"an",
"attribute",
"of",
"an",
"Elastic",
"Network",
"Interface",
"."
] |
python
|
train
|
tisimst/pyDOE
|
pyDOE/var_regression_matrix.py
|
https://github.com/tisimst/pyDOE/blob/436143702507a5c8ff87b361223eee8171d6a1d7/pyDOE/var_regression_matrix.py#L18-L51
|
def var_regression_matrix(H, x, model, sigma=1):
"""
Compute the variance of the 'regression error'.
Parameters
----------
H : 2d-array
The regression matrix
x : 2d-array
The coordinates to calculate the regression error variance at.
model : str
A string of tokens that define the regression model (e.g.
'1 x1 x2 x1*x2')
sigma : scalar
An estimate of the variance (default: 1).
Returns
-------
var : scalar
The variance of the regression error, evaluated at ``x``.
"""
x = np.atleast_2d(x)
H = np.atleast_2d(H)
if x.shape[0]==1:
x = x.T
if np.rank(H)<(np.dot(H.T, H)).shape[0]:
raise ValueError("model and DOE don't suit together")
x_mod = build_regression_matrix(x, model)
var = sigma**2*np.dot(np.dot(x_mod.T, np.linalg.inv(np.dot(H.T, H))), x_mod)
return var
|
[
"def",
"var_regression_matrix",
"(",
"H",
",",
"x",
",",
"model",
",",
"sigma",
"=",
"1",
")",
":",
"x",
"=",
"np",
".",
"atleast_2d",
"(",
"x",
")",
"H",
"=",
"np",
".",
"atleast_2d",
"(",
"H",
")",
"if",
"x",
".",
"shape",
"[",
"0",
"]",
"==",
"1",
":",
"x",
"=",
"x",
".",
"T",
"if",
"np",
".",
"rank",
"(",
"H",
")",
"<",
"(",
"np",
".",
"dot",
"(",
"H",
".",
"T",
",",
"H",
")",
")",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"\"model and DOE don't suit together\"",
")",
"x_mod",
"=",
"build_regression_matrix",
"(",
"x",
",",
"model",
")",
"var",
"=",
"sigma",
"**",
"2",
"*",
"np",
".",
"dot",
"(",
"np",
".",
"dot",
"(",
"x_mod",
".",
"T",
",",
"np",
".",
"linalg",
".",
"inv",
"(",
"np",
".",
"dot",
"(",
"H",
".",
"T",
",",
"H",
")",
")",
")",
",",
"x_mod",
")",
"return",
"var"
] |
Compute the variance of the 'regression error'.
Parameters
----------
H : 2d-array
The regression matrix
x : 2d-array
The coordinates to calculate the regression error variance at.
model : str
A string of tokens that define the regression model (e.g.
'1 x1 x2 x1*x2')
sigma : scalar
An estimate of the variance (default: 1).
Returns
-------
var : scalar
The variance of the regression error, evaluated at ``x``.
|
[
"Compute",
"the",
"variance",
"of",
"the",
"regression",
"error",
".",
"Parameters",
"----------",
"H",
":",
"2d",
"-",
"array",
"The",
"regression",
"matrix",
"x",
":",
"2d",
"-",
"array",
"The",
"coordinates",
"to",
"calculate",
"the",
"regression",
"error",
"variance",
"at",
".",
"model",
":",
"str",
"A",
"string",
"of",
"tokens",
"that",
"define",
"the",
"regression",
"model",
"(",
"e",
".",
"g",
".",
"1",
"x1",
"x2",
"x1",
"*",
"x2",
")",
"sigma",
":",
"scalar",
"An",
"estimate",
"of",
"the",
"variance",
"(",
"default",
":",
"1",
")",
".",
"Returns",
"-------",
"var",
":",
"scalar",
"The",
"variance",
"of",
"the",
"regression",
"error",
"evaluated",
"at",
"x",
"."
] |
python
|
train
|
cs50/check50
|
check50/py.py
|
https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/py.py#L10-L34
|
def append_code(original, codefile):
"""Append the contents of one file to another.
:param original: name of file that will be appended to
:type original: str
:param codefile: name of file that will be appende
:type codefile: str
This function is particularly useful when one wants to replace a function
in student code with their own implementation of one. If two functions are
defined with the same name in Python, the latter definition is taken so overwriting
a function is as simple as writing it to a file and then appending it to the
student's code.
Example usage::
# Include a file containing our own implementation of a lookup function.
check50.include("lookup.py")
# Overwrite the lookup function in helpers.py with our own implementation.
check50.py.append_code("helpers.py", "lookup.py")
"""
with open(codefile) as code, open(original, "a") as o:
o.write("\n")
o.writelines(code)
|
[
"def",
"append_code",
"(",
"original",
",",
"codefile",
")",
":",
"with",
"open",
"(",
"codefile",
")",
"as",
"code",
",",
"open",
"(",
"original",
",",
"\"a\"",
")",
"as",
"o",
":",
"o",
".",
"write",
"(",
"\"\\n\"",
")",
"o",
".",
"writelines",
"(",
"code",
")"
] |
Append the contents of one file to another.
:param original: name of file that will be appended to
:type original: str
:param codefile: name of file that will be appende
:type codefile: str
This function is particularly useful when one wants to replace a function
in student code with their own implementation of one. If two functions are
defined with the same name in Python, the latter definition is taken so overwriting
a function is as simple as writing it to a file and then appending it to the
student's code.
Example usage::
# Include a file containing our own implementation of a lookup function.
check50.include("lookup.py")
# Overwrite the lookup function in helpers.py with our own implementation.
check50.py.append_code("helpers.py", "lookup.py")
|
[
"Append",
"the",
"contents",
"of",
"one",
"file",
"to",
"another",
"."
] |
python
|
train
|
wtolson/gnsq
|
gnsq/httpclient.py
|
https://github.com/wtolson/gnsq/blob/0fd02578b2c9c5fa30626d78579db2a46c10edac/gnsq/httpclient.py#L28-L40
|
def from_url(cls, url, **kwargs):
"""Create a client from a url."""
url = urllib3.util.parse_url(url)
if url.host:
kwargs.setdefault('host', url.host)
if url.port:
kwargs.setdefault('port', url.port)
if url.scheme == 'https':
kwargs.setdefault('connection_class', urllib3.HTTPSConnectionPool)
return cls(**kwargs)
|
[
"def",
"from_url",
"(",
"cls",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"urllib3",
".",
"util",
".",
"parse_url",
"(",
"url",
")",
"if",
"url",
".",
"host",
":",
"kwargs",
".",
"setdefault",
"(",
"'host'",
",",
"url",
".",
"host",
")",
"if",
"url",
".",
"port",
":",
"kwargs",
".",
"setdefault",
"(",
"'port'",
",",
"url",
".",
"port",
")",
"if",
"url",
".",
"scheme",
"==",
"'https'",
":",
"kwargs",
".",
"setdefault",
"(",
"'connection_class'",
",",
"urllib3",
".",
"HTTPSConnectionPool",
")",
"return",
"cls",
"(",
"*",
"*",
"kwargs",
")"
] |
Create a client from a url.
|
[
"Create",
"a",
"client",
"from",
"a",
"url",
"."
] |
python
|
train
|
tensorflow/tensorboard
|
tensorboard/plugins/pr_curve/summary.py
|
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/pr_curve/summary.py#L347-L426
|
def raw_data_op(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds=None,
display_name=None,
description=None,
collections=None):
"""Create an op that collects data for visualizing PR curves.
Unlike the op above, this one avoids computing precision, recall, and the
intermediate counts. Instead, it accepts those tensors as arguments and
relies on the caller to ensure that the calculations are correct (and the
counts yield the provided precision and recall values).
This op is useful when a caller seeks to compute precision and recall
differently but still use the PR curves plugin.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
true_positive_counts: A rank-1 tensor of true positive counts. Must contain
`num_thresholds` elements and be castable to float32. Values correspond
to thresholds that increase from left to right (from 0 to 1).
false_positive_counts: A rank-1 tensor of false positive counts. Must
contain `num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
true_negative_counts: A rank-1 tensor of true negative counts. Must contain
`num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
false_negative_counts: A rank-1 tensor of false negative counts. Must
contain `num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
precision: A rank-1 tensor of precision values. Must contain
`num_thresholds` elements and be castable to float32. Values correspond
to thresholds that increase from left to right (from 0 to 1).
recall: A rank-1 tensor of recall values. Must contain `num_thresholds`
elements and be castable to float32. Values correspond to thresholds
that increase from left to right (from 0 to 1).
num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
compute PR metrics for. Should be `>= 2`. This value should be a
constant integer value, not a Tensor that stores an integer.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A summary operation for use in a TensorFlow graph. See docs for the `op`
method for details on the float32 tensor produced by this summary.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
with tf.name_scope(name, values=[
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
]):
return _create_tensor_summary(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds,
display_name,
description,
collections)
|
[
"def",
"raw_data_op",
"(",
"name",
",",
"true_positive_counts",
",",
"false_positive_counts",
",",
"true_negative_counts",
",",
"false_negative_counts",
",",
"precision",
",",
"recall",
",",
"num_thresholds",
"=",
"None",
",",
"display_name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"collections",
"=",
"None",
")",
":",
"# TODO(nickfelt): remove on-demand imports once dep situation is fixed.",
"import",
"tensorflow",
".",
"compat",
".",
"v1",
"as",
"tf",
"with",
"tf",
".",
"name_scope",
"(",
"name",
",",
"values",
"=",
"[",
"true_positive_counts",
",",
"false_positive_counts",
",",
"true_negative_counts",
",",
"false_negative_counts",
",",
"precision",
",",
"recall",
",",
"]",
")",
":",
"return",
"_create_tensor_summary",
"(",
"name",
",",
"true_positive_counts",
",",
"false_positive_counts",
",",
"true_negative_counts",
",",
"false_negative_counts",
",",
"precision",
",",
"recall",
",",
"num_thresholds",
",",
"display_name",
",",
"description",
",",
"collections",
")"
] |
Create an op that collects data for visualizing PR curves.
Unlike the op above, this one avoids computing precision, recall, and the
intermediate counts. Instead, it accepts those tensors as arguments and
relies on the caller to ensure that the calculations are correct (and the
counts yield the provided precision and recall values).
This op is useful when a caller seeks to compute precision and recall
differently but still use the PR curves plugin.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
true_positive_counts: A rank-1 tensor of true positive counts. Must contain
`num_thresholds` elements and be castable to float32. Values correspond
to thresholds that increase from left to right (from 0 to 1).
false_positive_counts: A rank-1 tensor of false positive counts. Must
contain `num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
true_negative_counts: A rank-1 tensor of true negative counts. Must contain
`num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
false_negative_counts: A rank-1 tensor of false negative counts. Must
contain `num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
precision: A rank-1 tensor of precision values. Must contain
`num_thresholds` elements and be castable to float32. Values correspond
to thresholds that increase from left to right (from 0 to 1).
recall: A rank-1 tensor of recall values. Must contain `num_thresholds`
elements and be castable to float32. Values correspond to thresholds
that increase from left to right (from 0 to 1).
num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
compute PR metrics for. Should be `>= 2`. This value should be a
constant integer value, not a Tensor that stores an integer.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A summary operation for use in a TensorFlow graph. See docs for the `op`
method for details on the float32 tensor produced by this summary.
|
[
"Create",
"an",
"op",
"that",
"collects",
"data",
"for",
"visualizing",
"PR",
"curves",
"."
] |
python
|
train
|
etingof/pysnmp
|
pysnmp/smi/mibs/SNMPv2-SMI.py
|
https://github.com/etingof/pysnmp/blob/cde062dd42f67dfd2d7686286a322d40e9c3a4b7/pysnmp/smi/mibs/SNMPv2-SMI.py#L478-L483
|
def getNode(self, name, **context):
"""Return tree node found by name"""
if name == self.name:
return self
else:
return self.getBranch(name, **context).getNode(name, **context)
|
[
"def",
"getNode",
"(",
"self",
",",
"name",
",",
"*",
"*",
"context",
")",
":",
"if",
"name",
"==",
"self",
".",
"name",
":",
"return",
"self",
"else",
":",
"return",
"self",
".",
"getBranch",
"(",
"name",
",",
"*",
"*",
"context",
")",
".",
"getNode",
"(",
"name",
",",
"*",
"*",
"context",
")"
] |
Return tree node found by name
|
[
"Return",
"tree",
"node",
"found",
"by",
"name"
] |
python
|
train
|
miguelgrinberg/python-engineio
|
engineio/server.py
|
https://github.com/miguelgrinberg/python-engineio/blob/261fd67103cb5d9a44369415748e66fdf62de6fb/engineio/server.py#L584-L589
|
def _gzip(self, response):
"""Apply gzip compression to a response."""
bytesio = six.BytesIO()
with gzip.GzipFile(fileobj=bytesio, mode='w') as gz:
gz.write(response)
return bytesio.getvalue()
|
[
"def",
"_gzip",
"(",
"self",
",",
"response",
")",
":",
"bytesio",
"=",
"six",
".",
"BytesIO",
"(",
")",
"with",
"gzip",
".",
"GzipFile",
"(",
"fileobj",
"=",
"bytesio",
",",
"mode",
"=",
"'w'",
")",
"as",
"gz",
":",
"gz",
".",
"write",
"(",
"response",
")",
"return",
"bytesio",
".",
"getvalue",
"(",
")"
] |
Apply gzip compression to a response.
|
[
"Apply",
"gzip",
"compression",
"to",
"a",
"response",
"."
] |
python
|
train
|
sangoma/pysensu
|
pysensu/api.py
|
https://github.com/sangoma/pysensu/blob/dc6799edbf2635247aec61fcf45b04ddec1beb49/pysensu/api.py#L126-L131
|
def get_event(self, client, check):
"""
Returns an event for a given client & check name.
"""
data = self._request('GET', '/events/{}/{}'.format(client, check))
return data.json()
|
[
"def",
"get_event",
"(",
"self",
",",
"client",
",",
"check",
")",
":",
"data",
"=",
"self",
".",
"_request",
"(",
"'GET'",
",",
"'/events/{}/{}'",
".",
"format",
"(",
"client",
",",
"check",
")",
")",
"return",
"data",
".",
"json",
"(",
")"
] |
Returns an event for a given client & check name.
|
[
"Returns",
"an",
"event",
"for",
"a",
"given",
"client",
"&",
"check",
"name",
"."
] |
python
|
train
|
bitesofcode/projexui
|
projexui/widgets/xorbcolumnedit/xorbcolumnedit.py
|
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbcolumnedit/xorbcolumnedit.py#L120-L144
|
def rebuild( self ):
"""
Clears out all the child widgets from this widget and creates the
widget that best matches the column properties for this edit.
"""
plugins.init()
self.blockSignals(True)
self.setUpdatesEnabled(False)
# clear the old editor
if ( self._editor ):
self._editor.close()
self._editor.setParent(None)
self._editor.deleteLater()
self._editor = None
# create a new widget
plugin_class = plugins.widgets.get(self._columnType)
if ( plugin_class ):
self._editor = plugin_class(self)
self.layout().addWidget(self._editor)
self.blockSignals(False)
self.setUpdatesEnabled(True)
|
[
"def",
"rebuild",
"(",
"self",
")",
":",
"plugins",
".",
"init",
"(",
")",
"self",
".",
"blockSignals",
"(",
"True",
")",
"self",
".",
"setUpdatesEnabled",
"(",
"False",
")",
"# clear the old editor\r",
"if",
"(",
"self",
".",
"_editor",
")",
":",
"self",
".",
"_editor",
".",
"close",
"(",
")",
"self",
".",
"_editor",
".",
"setParent",
"(",
"None",
")",
"self",
".",
"_editor",
".",
"deleteLater",
"(",
")",
"self",
".",
"_editor",
"=",
"None",
"# create a new widget\r",
"plugin_class",
"=",
"plugins",
".",
"widgets",
".",
"get",
"(",
"self",
".",
"_columnType",
")",
"if",
"(",
"plugin_class",
")",
":",
"self",
".",
"_editor",
"=",
"plugin_class",
"(",
"self",
")",
"self",
".",
"layout",
"(",
")",
".",
"addWidget",
"(",
"self",
".",
"_editor",
")",
"self",
".",
"blockSignals",
"(",
"False",
")",
"self",
".",
"setUpdatesEnabled",
"(",
"True",
")"
] |
Clears out all the child widgets from this widget and creates the
widget that best matches the column properties for this edit.
|
[
"Clears",
"out",
"all",
"the",
"child",
"widgets",
"from",
"this",
"widget",
"and",
"creates",
"the",
"widget",
"that",
"best",
"matches",
"the",
"column",
"properties",
"for",
"this",
"edit",
"."
] |
python
|
train
|
CityOfZion/neo-python-rpc
|
neorpc/Client.py
|
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L220-L231
|
def invoke_contract(self, contract_hash, params, id=None, endpoint=None):
"""
Invokes a contract
Args:
contract_hash: (str) hash of the contract, for example 'd7678dd97c000be3f33e9362e673101bac4ca654'
params: (list) a list of json ContractParameters to pass along with the invocation, example [{'type':7,'value':'symbol'},{'type':16, 'value':[]}]
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
"""
return self._call_endpoint(INVOKE, params=[contract_hash, params], id=id, endpoint=endpoint)
|
[
"def",
"invoke_contract",
"(",
"self",
",",
"contract_hash",
",",
"params",
",",
"id",
"=",
"None",
",",
"endpoint",
"=",
"None",
")",
":",
"return",
"self",
".",
"_call_endpoint",
"(",
"INVOKE",
",",
"params",
"=",
"[",
"contract_hash",
",",
"params",
"]",
",",
"id",
"=",
"id",
",",
"endpoint",
"=",
"endpoint",
")"
] |
Invokes a contract
Args:
contract_hash: (str) hash of the contract, for example 'd7678dd97c000be3f33e9362e673101bac4ca654'
params: (list) a list of json ContractParameters to pass along with the invocation, example [{'type':7,'value':'symbol'},{'type':16, 'value':[]}]
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
|
[
"Invokes",
"a",
"contract",
"Args",
":",
"contract_hash",
":",
"(",
"str",
")",
"hash",
"of",
"the",
"contract",
"for",
"example",
"d7678dd97c000be3f33e9362e673101bac4ca654",
"params",
":",
"(",
"list",
")",
"a",
"list",
"of",
"json",
"ContractParameters",
"to",
"pass",
"along",
"with",
"the",
"invocation",
"example",
"[",
"{",
"type",
":",
"7",
"value",
":",
"symbol",
"}",
"{",
"type",
":",
"16",
"value",
":",
"[]",
"}",
"]",
"id",
":",
"(",
"int",
"optional",
")",
"id",
"to",
"use",
"for",
"response",
"tracking",
"endpoint",
":",
"(",
"RPCEndpoint",
"optional",
")",
"endpoint",
"to",
"specify",
"to",
"use",
"Returns",
":",
"json",
"object",
"of",
"the",
"result",
"or",
"the",
"error",
"encountered",
"in",
"the",
"RPC",
"call"
] |
python
|
train
|
RiotGames/cloud-inquisitor
|
plugins/public/cinq-auditor-cloudtrail/cinq_auditor_cloudtrail/__init__.py
|
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/plugins/public/cinq-auditor-cloudtrail/cinq_auditor_cloudtrail/__init__.py#L503-L559
|
def create_s3_bucket(cls, bucket_name, bucket_region, bucket_account, template):
"""Creates the S3 bucket on the account specified as the destination account for log files
Args:
bucket_name (`str`): Name of the S3 bucket
bucket_region (`str`): AWS Region for the bucket
bucket_account (:obj:`Account`): Account to create the S3 bucket in
template (:obj:`Template`): Jinja2 Template object for the bucket policy
Returns:
`None`
"""
s3 = get_aws_session(bucket_account).client('s3', region_name=bucket_region)
# Check to see if the bucket already exists and if we have access to it
try:
s3.head_bucket(Bucket=bucket_name)
except ClientError as ex:
status_code = ex.response['ResponseMetadata']['HTTPStatusCode']
# Bucket exists and we do not have access
if status_code == 403:
raise Exception('Bucket {} already exists but we do not have access to it and so cannot continue'.format(
bucket_name
))
# Bucket does not exist, lets create one
elif status_code == 404:
try:
s3.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': bucket_region
}
)
auditlog(
event='cloudtrail.create_s3_bucket',
actor=cls.ns,
data={
'account': bucket_account.account_name,
'bucket_region': bucket_region,
'bucket_name': bucket_name
}
)
except Exception:
raise Exception('An error occured while trying to create the bucket, cannot continue')
try:
bucket_acl = template.render(
bucket_name=bucket_name,
account_id=bucket_account.account_number
)
s3.put_bucket_policy(Bucket=bucket_name, Policy=bucket_acl)
except Exception as ex:
raise Warning('An error occurred while setting bucket policy: {}'.format(ex))
|
[
"def",
"create_s3_bucket",
"(",
"cls",
",",
"bucket_name",
",",
"bucket_region",
",",
"bucket_account",
",",
"template",
")",
":",
"s3",
"=",
"get_aws_session",
"(",
"bucket_account",
")",
".",
"client",
"(",
"'s3'",
",",
"region_name",
"=",
"bucket_region",
")",
"# Check to see if the bucket already exists and if we have access to it",
"try",
":",
"s3",
".",
"head_bucket",
"(",
"Bucket",
"=",
"bucket_name",
")",
"except",
"ClientError",
"as",
"ex",
":",
"status_code",
"=",
"ex",
".",
"response",
"[",
"'ResponseMetadata'",
"]",
"[",
"'HTTPStatusCode'",
"]",
"# Bucket exists and we do not have access",
"if",
"status_code",
"==",
"403",
":",
"raise",
"Exception",
"(",
"'Bucket {} already exists but we do not have access to it and so cannot continue'",
".",
"format",
"(",
"bucket_name",
")",
")",
"# Bucket does not exist, lets create one",
"elif",
"status_code",
"==",
"404",
":",
"try",
":",
"s3",
".",
"create_bucket",
"(",
"Bucket",
"=",
"bucket_name",
",",
"CreateBucketConfiguration",
"=",
"{",
"'LocationConstraint'",
":",
"bucket_region",
"}",
")",
"auditlog",
"(",
"event",
"=",
"'cloudtrail.create_s3_bucket'",
",",
"actor",
"=",
"cls",
".",
"ns",
",",
"data",
"=",
"{",
"'account'",
":",
"bucket_account",
".",
"account_name",
",",
"'bucket_region'",
":",
"bucket_region",
",",
"'bucket_name'",
":",
"bucket_name",
"}",
")",
"except",
"Exception",
":",
"raise",
"Exception",
"(",
"'An error occured while trying to create the bucket, cannot continue'",
")",
"try",
":",
"bucket_acl",
"=",
"template",
".",
"render",
"(",
"bucket_name",
"=",
"bucket_name",
",",
"account_id",
"=",
"bucket_account",
".",
"account_number",
")",
"s3",
".",
"put_bucket_policy",
"(",
"Bucket",
"=",
"bucket_name",
",",
"Policy",
"=",
"bucket_acl",
")",
"except",
"Exception",
"as",
"ex",
":",
"raise",
"Warning",
"(",
"'An error occurred while setting bucket policy: {}'",
".",
"format",
"(",
"ex",
")",
")"
] |
Creates the S3 bucket on the account specified as the destination account for log files
Args:
bucket_name (`str`): Name of the S3 bucket
bucket_region (`str`): AWS Region for the bucket
bucket_account (:obj:`Account`): Account to create the S3 bucket in
template (:obj:`Template`): Jinja2 Template object for the bucket policy
Returns:
`None`
|
[
"Creates",
"the",
"S3",
"bucket",
"on",
"the",
"account",
"specified",
"as",
"the",
"destination",
"account",
"for",
"log",
"files"
] |
python
|
train
|
django-py/django-doberman
|
doberman/contrib/captcha/fields.py
|
https://github.com/django-py/django-doberman/blob/2e5959737a1b64234ed5a179c93f96a0de1c3e5c/doberman/contrib/captcha/fields.py#L59-L84
|
def _captcha_form(self):
"""
captcha form
:return:
"""
try:
last_attempt = FailedAccessAttempt.objects.get(
ip_address=self._ip,
is_locked=True,
captcha_enabled=True,
is_expired=False
)
except FailedAccessAttempt.DoesNotExist:
last_attempt = None
self.required = False
self.widget = HiddenInput()
if last_attempt:
self._last_attempt = last_attempt
if last_attempt.is_locked:
self.required = True
self.widget = ReCaptcha(
public_key=self.public_key, use_ssl=self.use_ssl, attrs=self.attrs
)
|
[
"def",
"_captcha_form",
"(",
"self",
")",
":",
"try",
":",
"last_attempt",
"=",
"FailedAccessAttempt",
".",
"objects",
".",
"get",
"(",
"ip_address",
"=",
"self",
".",
"_ip",
",",
"is_locked",
"=",
"True",
",",
"captcha_enabled",
"=",
"True",
",",
"is_expired",
"=",
"False",
")",
"except",
"FailedAccessAttempt",
".",
"DoesNotExist",
":",
"last_attempt",
"=",
"None",
"self",
".",
"required",
"=",
"False",
"self",
".",
"widget",
"=",
"HiddenInput",
"(",
")",
"if",
"last_attempt",
":",
"self",
".",
"_last_attempt",
"=",
"last_attempt",
"if",
"last_attempt",
".",
"is_locked",
":",
"self",
".",
"required",
"=",
"True",
"self",
".",
"widget",
"=",
"ReCaptcha",
"(",
"public_key",
"=",
"self",
".",
"public_key",
",",
"use_ssl",
"=",
"self",
".",
"use_ssl",
",",
"attrs",
"=",
"self",
".",
"attrs",
")"
] |
captcha form
:return:
|
[
"captcha",
"form",
":",
"return",
":"
] |
python
|
train
|
codelv/enaml-native
|
src/enamlnative/android/android_radio_group.py
|
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_radio_group.py#L89-L101
|
def set_checked(self, checked):
""" Properly check the correct radio button.
"""
if not checked:
self.widget.clearCheck()
else:
#: Checked is a reference to the radio declaration
#: so we need to get the ID of it
rb = checked.proxy.widget
if not rb:
return
self.widget.check(rb.getId())
|
[
"def",
"set_checked",
"(",
"self",
",",
"checked",
")",
":",
"if",
"not",
"checked",
":",
"self",
".",
"widget",
".",
"clearCheck",
"(",
")",
"else",
":",
"#: Checked is a reference to the radio declaration",
"#: so we need to get the ID of it",
"rb",
"=",
"checked",
".",
"proxy",
".",
"widget",
"if",
"not",
"rb",
":",
"return",
"self",
".",
"widget",
".",
"check",
"(",
"rb",
".",
"getId",
"(",
")",
")"
] |
Properly check the correct radio button.
|
[
"Properly",
"check",
"the",
"correct",
"radio",
"button",
"."
] |
python
|
train
|
tanghaibao/jcvi
|
jcvi/assembly/syntenypath.py
|
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/syntenypath.py#L128-L184
|
def bed(args):
"""
%prog bed anchorsfile
Convert ANCHORS file to BED format.
"""
from collections import defaultdict
from jcvi.compara.synteny import AnchorFile, check_beds
from jcvi.formats.bed import Bed
from jcvi.formats.base import get_number
p = OptionParser(bed.__doc__)
p.add_option("--switch", default=False, action="store_true",
help="Switch reference and aligned map elements")
p.add_option("--scale", type="float",
help="Scale the aligned map distance by factor")
p.set_beds()
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
anchorsfile, = args
switch = opts.switch
scale = opts.scale
ac = AnchorFile(anchorsfile)
pairs = defaultdict(list)
for a, b, block_id in ac.iter_pairs():
pairs[a].append(b)
qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts)
bd = Bed()
for q in qbed:
qseqid, qstart, qend, qaccn = q.seqid, q.start, q.end, q.accn
if qaccn not in pairs:
continue
for s in pairs[qaccn]:
si, s = sorder[s]
sseqid, sstart, send, saccn = s.seqid, s.start, s.end, s.accn
if switch:
qseqid, sseqid = sseqid, qseqid
qstart, sstart = sstart, qstart
qend, send = send, qend
qaccn, saccn = saccn, qaccn
if scale:
sstart /= scale
try:
newsseqid = get_number(sseqid)
except ValueError:
raise ValueError("`{0}` is on `{1}` with no number to extract".\
format(saccn, sseqid))
bedline = "\t".join(str(x) for x in (qseqid, qstart - 1, qend,
"{0}:{1}".format(newsseqid, sstart)))
bd.add(bedline)
bd.print_to_file(filename=opts.outfile, sorted=True)
|
[
"def",
"bed",
"(",
"args",
")",
":",
"from",
"collections",
"import",
"defaultdict",
"from",
"jcvi",
".",
"compara",
".",
"synteny",
"import",
"AnchorFile",
",",
"check_beds",
"from",
"jcvi",
".",
"formats",
".",
"bed",
"import",
"Bed",
"from",
"jcvi",
".",
"formats",
".",
"base",
"import",
"get_number",
"p",
"=",
"OptionParser",
"(",
"bed",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--switch\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Switch reference and aligned map elements\"",
")",
"p",
".",
"add_option",
"(",
"\"--scale\"",
",",
"type",
"=",
"\"float\"",
",",
"help",
"=",
"\"Scale the aligned map distance by factor\"",
")",
"p",
".",
"set_beds",
"(",
")",
"p",
".",
"set_outfile",
"(",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"anchorsfile",
",",
"=",
"args",
"switch",
"=",
"opts",
".",
"switch",
"scale",
"=",
"opts",
".",
"scale",
"ac",
"=",
"AnchorFile",
"(",
"anchorsfile",
")",
"pairs",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"a",
",",
"b",
",",
"block_id",
"in",
"ac",
".",
"iter_pairs",
"(",
")",
":",
"pairs",
"[",
"a",
"]",
".",
"append",
"(",
"b",
")",
"qbed",
",",
"sbed",
",",
"qorder",
",",
"sorder",
",",
"is_self",
"=",
"check_beds",
"(",
"anchorsfile",
",",
"p",
",",
"opts",
")",
"bd",
"=",
"Bed",
"(",
")",
"for",
"q",
"in",
"qbed",
":",
"qseqid",
",",
"qstart",
",",
"qend",
",",
"qaccn",
"=",
"q",
".",
"seqid",
",",
"q",
".",
"start",
",",
"q",
".",
"end",
",",
"q",
".",
"accn",
"if",
"qaccn",
"not",
"in",
"pairs",
":",
"continue",
"for",
"s",
"in",
"pairs",
"[",
"qaccn",
"]",
":",
"si",
",",
"s",
"=",
"sorder",
"[",
"s",
"]",
"sseqid",
",",
"sstart",
",",
"send",
",",
"saccn",
"=",
"s",
".",
"seqid",
",",
"s",
".",
"start",
",",
"s",
".",
"end",
",",
"s",
".",
"accn",
"if",
"switch",
":",
"qseqid",
",",
"sseqid",
"=",
"sseqid",
",",
"qseqid",
"qstart",
",",
"sstart",
"=",
"sstart",
",",
"qstart",
"qend",
",",
"send",
"=",
"send",
",",
"qend",
"qaccn",
",",
"saccn",
"=",
"saccn",
",",
"qaccn",
"if",
"scale",
":",
"sstart",
"/=",
"scale",
"try",
":",
"newsseqid",
"=",
"get_number",
"(",
"sseqid",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"`{0}` is on `{1}` with no number to extract\"",
".",
"format",
"(",
"saccn",
",",
"sseqid",
")",
")",
"bedline",
"=",
"\"\\t\"",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"qseqid",
",",
"qstart",
"-",
"1",
",",
"qend",
",",
"\"{0}:{1}\"",
".",
"format",
"(",
"newsseqid",
",",
"sstart",
")",
")",
")",
"bd",
".",
"add",
"(",
"bedline",
")",
"bd",
".",
"print_to_file",
"(",
"filename",
"=",
"opts",
".",
"outfile",
",",
"sorted",
"=",
"True",
")"
] |
%prog bed anchorsfile
Convert ANCHORS file to BED format.
|
[
"%prog",
"bed",
"anchorsfile"
] |
python
|
train
|
loli/medpy
|
medpy/graphcut/graph.py
|
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/graphcut/graph.py#L299-L327
|
def set_source_nodes(self, source_nodes):
r"""
Set multiple source nodes and compute their t-weights.
Parameters
----------
source_nodes : sequence of integers
Declare the source nodes via their ids.
Raises
------
ValueError
If a passed node id does not refer to any node of the graph
(i.e. it is either higher than the initially set number of
nodes or lower than zero).
Notes
-----
It does not get checked if one of the supplied source-nodes already has
a weight assigned (e.g. by passing it to `set_sink_nodes`). This can
occur when the foreground- and background-markers cover the same region. In this
case the order of setting the terminal nodes can affect the graph and therefore
the graph-cut result.
"""
if max(source_nodes) >= self.__nodes or min(source_nodes) < 0:
raise ValueError('Invalid node id of {} or {}. Valid values are 0 to {}.'.format(max(source_nodes), min(source_nodes), self.__nodes - 1))
# set the source-to-node weights (t-weights)
for snode in source_nodes:
self.__graph.add_tweights(int(snode), self.MAX, 0)
|
[
"def",
"set_source_nodes",
"(",
"self",
",",
"source_nodes",
")",
":",
"if",
"max",
"(",
"source_nodes",
")",
">=",
"self",
".",
"__nodes",
"or",
"min",
"(",
"source_nodes",
")",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'Invalid node id of {} or {}. Valid values are 0 to {}.'",
".",
"format",
"(",
"max",
"(",
"source_nodes",
")",
",",
"min",
"(",
"source_nodes",
")",
",",
"self",
".",
"__nodes",
"-",
"1",
")",
")",
"# set the source-to-node weights (t-weights)",
"for",
"snode",
"in",
"source_nodes",
":",
"self",
".",
"__graph",
".",
"add_tweights",
"(",
"int",
"(",
"snode",
")",
",",
"self",
".",
"MAX",
",",
"0",
")"
] |
r"""
Set multiple source nodes and compute their t-weights.
Parameters
----------
source_nodes : sequence of integers
Declare the source nodes via their ids.
Raises
------
ValueError
If a passed node id does not refer to any node of the graph
(i.e. it is either higher than the initially set number of
nodes or lower than zero).
Notes
-----
It does not get checked if one of the supplied source-nodes already has
a weight assigned (e.g. by passing it to `set_sink_nodes`). This can
occur when the foreground- and background-markers cover the same region. In this
case the order of setting the terminal nodes can affect the graph and therefore
the graph-cut result.
|
[
"r",
"Set",
"multiple",
"source",
"nodes",
"and",
"compute",
"their",
"t",
"-",
"weights",
".",
"Parameters",
"----------",
"source_nodes",
":",
"sequence",
"of",
"integers",
"Declare",
"the",
"source",
"nodes",
"via",
"their",
"ids",
".",
"Raises",
"------",
"ValueError",
"If",
"a",
"passed",
"node",
"id",
"does",
"not",
"refer",
"to",
"any",
"node",
"of",
"the",
"graph",
"(",
"i",
".",
"e",
".",
"it",
"is",
"either",
"higher",
"than",
"the",
"initially",
"set",
"number",
"of",
"nodes",
"or",
"lower",
"than",
"zero",
")",
".",
"Notes",
"-----",
"It",
"does",
"not",
"get",
"checked",
"if",
"one",
"of",
"the",
"supplied",
"source",
"-",
"nodes",
"already",
"has",
"a",
"weight",
"assigned",
"(",
"e",
".",
"g",
".",
"by",
"passing",
"it",
"to",
"set_sink_nodes",
")",
".",
"This",
"can",
"occur",
"when",
"the",
"foreground",
"-",
"and",
"background",
"-",
"markers",
"cover",
"the",
"same",
"region",
".",
"In",
"this",
"case",
"the",
"order",
"of",
"setting",
"the",
"terminal",
"nodes",
"can",
"affect",
"the",
"graph",
"and",
"therefore",
"the",
"graph",
"-",
"cut",
"result",
"."
] |
python
|
train
|
nanoporetech/ont_fast5_api
|
ont_fast5_api/fast5_file.py
|
https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_file.py#L125-L131
|
def get_tracking_id(self):
""" Returns a dictionary of tracking-id key/value pairs.
"""
self.assert_open()
tracking = self.handle[self.global_key +'tracking_id'].attrs.items()
tracking = {key: _clean(value) for key, value in tracking}
return tracking
|
[
"def",
"get_tracking_id",
"(",
"self",
")",
":",
"self",
".",
"assert_open",
"(",
")",
"tracking",
"=",
"self",
".",
"handle",
"[",
"self",
".",
"global_key",
"+",
"'tracking_id'",
"]",
".",
"attrs",
".",
"items",
"(",
")",
"tracking",
"=",
"{",
"key",
":",
"_clean",
"(",
"value",
")",
"for",
"key",
",",
"value",
"in",
"tracking",
"}",
"return",
"tracking"
] |
Returns a dictionary of tracking-id key/value pairs.
|
[
"Returns",
"a",
"dictionary",
"of",
"tracking",
"-",
"id",
"key",
"/",
"value",
"pairs",
"."
] |
python
|
train
|
psd-tools/psd-tools
|
src/psd_tools/utils.py
|
https://github.com/psd-tools/psd-tools/blob/4952b57bcf1cf2c1f16fd9d6d51d4fa0b53bce4e/src/psd_tools/utils.py#L121-L135
|
def write_position(fp, position, value, fmt='I'):
"""
Writes a value to the specified position.
:param fp: file-like object
:param position: position of the value marker
:param value: value to write
:param fmt: format of the value
:return: written byte size
"""
current_position = fp.tell()
fp.seek(position)
written = write_bytes(fp, struct.pack(str('>' + fmt), value))
fp.seek(current_position)
return written
|
[
"def",
"write_position",
"(",
"fp",
",",
"position",
",",
"value",
",",
"fmt",
"=",
"'I'",
")",
":",
"current_position",
"=",
"fp",
".",
"tell",
"(",
")",
"fp",
".",
"seek",
"(",
"position",
")",
"written",
"=",
"write_bytes",
"(",
"fp",
",",
"struct",
".",
"pack",
"(",
"str",
"(",
"'>'",
"+",
"fmt",
")",
",",
"value",
")",
")",
"fp",
".",
"seek",
"(",
"current_position",
")",
"return",
"written"
] |
Writes a value to the specified position.
:param fp: file-like object
:param position: position of the value marker
:param value: value to write
:param fmt: format of the value
:return: written byte size
|
[
"Writes",
"a",
"value",
"to",
"the",
"specified",
"position",
"."
] |
python
|
train
|
MacHu-GWU/pyknackhq-project
|
pyknackhq/client.py
|
https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L50-L62
|
def get_html_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using html data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
if field.key in pydict:
if recovery_name:
new_dict[field.name] = pydict[field.key]
else:
new_dict[field.key] = pydict[field.key]
return new_dict
|
[
"def",
"get_html_values",
"(",
"self",
",",
"pydict",
",",
"recovery_name",
"=",
"True",
")",
":",
"new_dict",
"=",
"{",
"\"id\"",
":",
"pydict",
"[",
"\"id\"",
"]",
"}",
"for",
"field",
"in",
"self",
":",
"if",
"field",
".",
"key",
"in",
"pydict",
":",
"if",
"recovery_name",
":",
"new_dict",
"[",
"field",
".",
"name",
"]",
"=",
"pydict",
"[",
"field",
".",
"key",
"]",
"else",
":",
"new_dict",
"[",
"field",
".",
"key",
"]",
"=",
"pydict",
"[",
"field",
".",
"key",
"]",
"return",
"new_dict"
] |
Convert naive get response data to human readable field name format.
using html data format.
|
[
"Convert",
"naive",
"get",
"response",
"data",
"to",
"human",
"readable",
"field",
"name",
"format",
".",
"using",
"html",
"data",
"format",
"."
] |
python
|
train
|
MacHu-GWU/angora-project
|
angora/zzz_manual_install.py
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/zzz_manual_install.py#L145-L160
|
def check_need_install():
"""Check if installed package are exactly the same to this one.
"""
md5_root, md5_dst = list(), list()
need_install_flag = False
for root, _, basename_list in os.walk(_ROOT):
if os.path.basename(root) != "__pycache__":
for basename in basename_list:
src = os.path.join(root, basename)
dst = os.path.join(root.replace(_ROOT, _DST), basename)
if os.path.exists(dst):
if md5_of_file(src) != md5_of_file(dst):
return True
else:
return True
return need_install_flag
|
[
"def",
"check_need_install",
"(",
")",
":",
"md5_root",
",",
"md5_dst",
"=",
"list",
"(",
")",
",",
"list",
"(",
")",
"need_install_flag",
"=",
"False",
"for",
"root",
",",
"_",
",",
"basename_list",
"in",
"os",
".",
"walk",
"(",
"_ROOT",
")",
":",
"if",
"os",
".",
"path",
".",
"basename",
"(",
"root",
")",
"!=",
"\"__pycache__\"",
":",
"for",
"basename",
"in",
"basename_list",
":",
"src",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"basename",
")",
"dst",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
".",
"replace",
"(",
"_ROOT",
",",
"_DST",
")",
",",
"basename",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dst",
")",
":",
"if",
"md5_of_file",
"(",
"src",
")",
"!=",
"md5_of_file",
"(",
"dst",
")",
":",
"return",
"True",
"else",
":",
"return",
"True",
"return",
"need_install_flag"
] |
Check if installed package are exactly the same to this one.
|
[
"Check",
"if",
"installed",
"package",
"are",
"exactly",
"the",
"same",
"to",
"this",
"one",
"."
] |
python
|
train
|
EntilZha/PyFunctional
|
functional/pipeline.py
|
https://github.com/EntilZha/PyFunctional/blob/ac04e4a8552b0c464a7f492f7c9862424867b63e/functional/pipeline.py#L1074-L1091
|
def fold_left(self, zero_value, func):
"""
Assuming that the sequence elements are of type A, folds from left to right starting with
the seed value given by zero_value (of type A) using a function of type
func(current: B, next: A) => B. current represents the folded value so far and next is the
next element from the sequence to fold into current.
>>> seq('a', 'b', 'c').fold_left(['start'], lambda current, next: current + [next]))
['start', 'a', 'b', 'c']
:param zero_value: zero value to reduce into
:param func: Two parameter function as described by function docs
:return: value from folding values with func into zero_value from left to right.
"""
result = zero_value
for element in self:
result = func(result, element)
return _wrap(result)
|
[
"def",
"fold_left",
"(",
"self",
",",
"zero_value",
",",
"func",
")",
":",
"result",
"=",
"zero_value",
"for",
"element",
"in",
"self",
":",
"result",
"=",
"func",
"(",
"result",
",",
"element",
")",
"return",
"_wrap",
"(",
"result",
")"
] |
Assuming that the sequence elements are of type A, folds from left to right starting with
the seed value given by zero_value (of type A) using a function of type
func(current: B, next: A) => B. current represents the folded value so far and next is the
next element from the sequence to fold into current.
>>> seq('a', 'b', 'c').fold_left(['start'], lambda current, next: current + [next]))
['start', 'a', 'b', 'c']
:param zero_value: zero value to reduce into
:param func: Two parameter function as described by function docs
:return: value from folding values with func into zero_value from left to right.
|
[
"Assuming",
"that",
"the",
"sequence",
"elements",
"are",
"of",
"type",
"A",
"folds",
"from",
"left",
"to",
"right",
"starting",
"with",
"the",
"seed",
"value",
"given",
"by",
"zero_value",
"(",
"of",
"type",
"A",
")",
"using",
"a",
"function",
"of",
"type",
"func",
"(",
"current",
":",
"B",
"next",
":",
"A",
")",
"=",
">",
"B",
".",
"current",
"represents",
"the",
"folded",
"value",
"so",
"far",
"and",
"next",
"is",
"the",
"next",
"element",
"from",
"the",
"sequence",
"to",
"fold",
"into",
"current",
"."
] |
python
|
train
|
VIVelev/PyDojoML
|
dojo/base/model.py
|
https://github.com/VIVelev/PyDojoML/blob/773fdce6866aa6decd306a5a85f94129fed816eb/dojo/base/model.py#L161-L183
|
def fit_predict(self, X, y, X_):
"""Shortcut to `model.fit(X, y); return model.predict(X_)`.
Parameters:
-----------
X : matrix, shape (n_samples, n_features)
The samples, the train data.
y : vector, shape (n_samples,)
The target labels.
X_ : matrix, shape (m_samples, m_features)
The samples which labels to predict.
Returns:
--------
y : vector, shape (m_samples,)
The predicted labels.
"""
self.fit(X, y)
return self.predict(X_)
|
[
"def",
"fit_predict",
"(",
"self",
",",
"X",
",",
"y",
",",
"X_",
")",
":",
"self",
".",
"fit",
"(",
"X",
",",
"y",
")",
"return",
"self",
".",
"predict",
"(",
"X_",
")"
] |
Shortcut to `model.fit(X, y); return model.predict(X_)`.
Parameters:
-----------
X : matrix, shape (n_samples, n_features)
The samples, the train data.
y : vector, shape (n_samples,)
The target labels.
X_ : matrix, shape (m_samples, m_features)
The samples which labels to predict.
Returns:
--------
y : vector, shape (m_samples,)
The predicted labels.
|
[
"Shortcut",
"to",
"model",
".",
"fit",
"(",
"X",
"y",
")",
";",
"return",
"model",
".",
"predict",
"(",
"X_",
")",
".",
"Parameters",
":",
"-----------",
"X",
":",
"matrix",
"shape",
"(",
"n_samples",
"n_features",
")",
"The",
"samples",
"the",
"train",
"data",
"."
] |
python
|
train
|
jasonlaska/spherecluster
|
spherecluster/util.py
|
https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/util.py#L16-L32
|
def sample_vMF(mu, kappa, num_samples):
"""Generate num_samples N-dimensional samples from von Mises Fisher
distribution around center mu \in R^N with concentration kappa.
"""
dim = len(mu)
result = np.zeros((num_samples, dim))
for nn in range(num_samples):
# sample offset from center (on sphere) with spread kappa
w = _sample_weight(kappa, dim)
# sample a point v on the unit sphere that's orthogonal to mu
v = _sample_orthonormal_to(mu)
# compute new point
result[nn, :] = v * np.sqrt(1. - w ** 2) + w * mu
return result
|
[
"def",
"sample_vMF",
"(",
"mu",
",",
"kappa",
",",
"num_samples",
")",
":",
"dim",
"=",
"len",
"(",
"mu",
")",
"result",
"=",
"np",
".",
"zeros",
"(",
"(",
"num_samples",
",",
"dim",
")",
")",
"for",
"nn",
"in",
"range",
"(",
"num_samples",
")",
":",
"# sample offset from center (on sphere) with spread kappa",
"w",
"=",
"_sample_weight",
"(",
"kappa",
",",
"dim",
")",
"# sample a point v on the unit sphere that's orthogonal to mu",
"v",
"=",
"_sample_orthonormal_to",
"(",
"mu",
")",
"# compute new point",
"result",
"[",
"nn",
",",
":",
"]",
"=",
"v",
"*",
"np",
".",
"sqrt",
"(",
"1.",
"-",
"w",
"**",
"2",
")",
"+",
"w",
"*",
"mu",
"return",
"result"
] |
Generate num_samples N-dimensional samples from von Mises Fisher
distribution around center mu \in R^N with concentration kappa.
|
[
"Generate",
"num_samples",
"N",
"-",
"dimensional",
"samples",
"from",
"von",
"Mises",
"Fisher",
"distribution",
"around",
"center",
"mu",
"\\",
"in",
"R^N",
"with",
"concentration",
"kappa",
"."
] |
python
|
train
|
jobec/django-auth-adfs
|
django_auth_adfs/backend.py
|
https://github.com/jobec/django-auth-adfs/blob/07197be392724d16a6132b03d9eafb1d634749cf/django_auth_adfs/backend.py#L160-L206
|
def update_user_groups(self, user, claims):
"""
Updates user group memberships based on the GROUPS_CLAIM setting.
Args:
user (django.contrib.auth.models.User): User model instance
claims (dict): Claims from the access token
"""
if settings.GROUPS_CLAIM is not None:
# Update the user's group memberships
django_groups = [group.name for group in user.groups.all()]
if settings.GROUPS_CLAIM in claims:
claim_groups = claims[settings.GROUPS_CLAIM]
if not isinstance(claim_groups, list):
claim_groups = [claim_groups, ]
else:
logger.debug(
"The configured groups claim '{}' was not found in the access token".format(settings.GROUPS_CLAIM))
claim_groups = []
# Make a diff of the user's groups.
# Removing a user from all groups and then re-add them would cause
# the autoincrement value for the database table storing the
# user-to-group mappings to increment for no reason.
groups_to_remove = set(django_groups) - set(claim_groups)
groups_to_add = set(claim_groups) - set(django_groups)
# Loop through the groups in the group claim and
# add the user to these groups as needed.
for group_name in groups_to_remove:
group = Group.objects.get(name=group_name)
user.groups.remove(group)
logger.debug("User removed from group '{}'".format(group_name))
for group_name in groups_to_add:
try:
if settings.MIRROR_GROUPS:
group, _ = Group.objects.get_or_create(name=group_name)
logger.debug("Created group '{}'".format(group_name))
else:
group = Group.objects.get(name=group_name)
user.groups.add(group)
logger.debug("User added to group '{}'".format(group_name))
except ObjectDoesNotExist:
# Silently fail for non-existing groups.
pass
|
[
"def",
"update_user_groups",
"(",
"self",
",",
"user",
",",
"claims",
")",
":",
"if",
"settings",
".",
"GROUPS_CLAIM",
"is",
"not",
"None",
":",
"# Update the user's group memberships",
"django_groups",
"=",
"[",
"group",
".",
"name",
"for",
"group",
"in",
"user",
".",
"groups",
".",
"all",
"(",
")",
"]",
"if",
"settings",
".",
"GROUPS_CLAIM",
"in",
"claims",
":",
"claim_groups",
"=",
"claims",
"[",
"settings",
".",
"GROUPS_CLAIM",
"]",
"if",
"not",
"isinstance",
"(",
"claim_groups",
",",
"list",
")",
":",
"claim_groups",
"=",
"[",
"claim_groups",
",",
"]",
"else",
":",
"logger",
".",
"debug",
"(",
"\"The configured groups claim '{}' was not found in the access token\"",
".",
"format",
"(",
"settings",
".",
"GROUPS_CLAIM",
")",
")",
"claim_groups",
"=",
"[",
"]",
"# Make a diff of the user's groups.",
"# Removing a user from all groups and then re-add them would cause",
"# the autoincrement value for the database table storing the",
"# user-to-group mappings to increment for no reason.",
"groups_to_remove",
"=",
"set",
"(",
"django_groups",
")",
"-",
"set",
"(",
"claim_groups",
")",
"groups_to_add",
"=",
"set",
"(",
"claim_groups",
")",
"-",
"set",
"(",
"django_groups",
")",
"# Loop through the groups in the group claim and",
"# add the user to these groups as needed.",
"for",
"group_name",
"in",
"groups_to_remove",
":",
"group",
"=",
"Group",
".",
"objects",
".",
"get",
"(",
"name",
"=",
"group_name",
")",
"user",
".",
"groups",
".",
"remove",
"(",
"group",
")",
"logger",
".",
"debug",
"(",
"\"User removed from group '{}'\"",
".",
"format",
"(",
"group_name",
")",
")",
"for",
"group_name",
"in",
"groups_to_add",
":",
"try",
":",
"if",
"settings",
".",
"MIRROR_GROUPS",
":",
"group",
",",
"_",
"=",
"Group",
".",
"objects",
".",
"get_or_create",
"(",
"name",
"=",
"group_name",
")",
"logger",
".",
"debug",
"(",
"\"Created group '{}'\"",
".",
"format",
"(",
"group_name",
")",
")",
"else",
":",
"group",
"=",
"Group",
".",
"objects",
".",
"get",
"(",
"name",
"=",
"group_name",
")",
"user",
".",
"groups",
".",
"add",
"(",
"group",
")",
"logger",
".",
"debug",
"(",
"\"User added to group '{}'\"",
".",
"format",
"(",
"group_name",
")",
")",
"except",
"ObjectDoesNotExist",
":",
"# Silently fail for non-existing groups.",
"pass"
] |
Updates user group memberships based on the GROUPS_CLAIM setting.
Args:
user (django.contrib.auth.models.User): User model instance
claims (dict): Claims from the access token
|
[
"Updates",
"user",
"group",
"memberships",
"based",
"on",
"the",
"GROUPS_CLAIM",
"setting",
"."
] |
python
|
train
|
Spinmob/spinmob
|
_data.py
|
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_data.py#L2380-L2395
|
def _format_value_error(self, v, e, pm=" +/- "):
"""
Returns a string v +/- e with the right number of sig figs.
"""
# If we have weird stuff
if not _s.fun.is_a_number(v) or not _s.fun.is_a_number(e) \
or v in [_n.inf, _n.nan, _n.NAN] or e in [_n.inf, _n.nan, _n.NAN]:
return str(v)+pm+str(e)
# Normal values.
try:
sig_figs = -int(_n.floor(_n.log10(abs(e))))+1
return str(_n.round(v, sig_figs)) + pm + str(_n.round(e, sig_figs))
except:
return str(v)+pm+str(e)
|
[
"def",
"_format_value_error",
"(",
"self",
",",
"v",
",",
"e",
",",
"pm",
"=",
"\" +/- \"",
")",
":",
"# If we have weird stuff",
"if",
"not",
"_s",
".",
"fun",
".",
"is_a_number",
"(",
"v",
")",
"or",
"not",
"_s",
".",
"fun",
".",
"is_a_number",
"(",
"e",
")",
"or",
"v",
"in",
"[",
"_n",
".",
"inf",
",",
"_n",
".",
"nan",
",",
"_n",
".",
"NAN",
"]",
"or",
"e",
"in",
"[",
"_n",
".",
"inf",
",",
"_n",
".",
"nan",
",",
"_n",
".",
"NAN",
"]",
":",
"return",
"str",
"(",
"v",
")",
"+",
"pm",
"+",
"str",
"(",
"e",
")",
"# Normal values.",
"try",
":",
"sig_figs",
"=",
"-",
"int",
"(",
"_n",
".",
"floor",
"(",
"_n",
".",
"log10",
"(",
"abs",
"(",
"e",
")",
")",
")",
")",
"+",
"1",
"return",
"str",
"(",
"_n",
".",
"round",
"(",
"v",
",",
"sig_figs",
")",
")",
"+",
"pm",
"+",
"str",
"(",
"_n",
".",
"round",
"(",
"e",
",",
"sig_figs",
")",
")",
"except",
":",
"return",
"str",
"(",
"v",
")",
"+",
"pm",
"+",
"str",
"(",
"e",
")"
] |
Returns a string v +/- e with the right number of sig figs.
|
[
"Returns",
"a",
"string",
"v",
"+",
"/",
"-",
"e",
"with",
"the",
"right",
"number",
"of",
"sig",
"figs",
"."
] |
python
|
train
|
StellarCN/py-stellar-base
|
stellar_base/builder.py
|
https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/builder.py#L474-L491
|
def append_manage_data_op(self, data_name, data_value, source=None):
"""Append a :class:`ManageData <stellar_base.operation.ManageData>`
operation to the list of operations.
:param str data_name: String up to 64 bytes long. If this is a new Name
it will add the given name/value pair to the account. If this Name
is already present then the associated value will be modified.
:param data_value: If not present then the existing
Name will be deleted. If present then this value will be set in the
DataEntry. Up to 64 bytes long.
:type data_value: str, bytes, None
:param str source: The source account on which data is being managed.
operation.
:return: This builder instance.
"""
op = operation.ManageData(data_name, data_value, source)
return self.append_op(op)
|
[
"def",
"append_manage_data_op",
"(",
"self",
",",
"data_name",
",",
"data_value",
",",
"source",
"=",
"None",
")",
":",
"op",
"=",
"operation",
".",
"ManageData",
"(",
"data_name",
",",
"data_value",
",",
"source",
")",
"return",
"self",
".",
"append_op",
"(",
"op",
")"
] |
Append a :class:`ManageData <stellar_base.operation.ManageData>`
operation to the list of operations.
:param str data_name: String up to 64 bytes long. If this is a new Name
it will add the given name/value pair to the account. If this Name
is already present then the associated value will be modified.
:param data_value: If not present then the existing
Name will be deleted. If present then this value will be set in the
DataEntry. Up to 64 bytes long.
:type data_value: str, bytes, None
:param str source: The source account on which data is being managed.
operation.
:return: This builder instance.
|
[
"Append",
"a",
":",
"class",
":",
"ManageData",
"<stellar_base",
".",
"operation",
".",
"ManageData",
">",
"operation",
"to",
"the",
"list",
"of",
"operations",
"."
] |
python
|
train
|
absent1706/sqlalchemy-mixins
|
sqlalchemy_mixins/eagerload.py
|
https://github.com/absent1706/sqlalchemy-mixins/blob/a111e69fc5edc5d81a31dca45755f21c8c512ed1/sqlalchemy_mixins/eagerload.py#L100-L116
|
def with_joined(cls, *paths):
"""
Eagerload for simple cases where we need to just
joined load some relations
In strings syntax, you can split relations with dot
due to this SQLAlchemy feature: https://goo.gl/yM2DLX
:type paths: *List[str] | *List[InstrumentedAttribute]
Example 1:
Comment.with_joined('user', 'post', 'post.comments').first()
Example 2:
Comment.with_joined(Comment.user, Comment.post).first()
"""
options = [joinedload(path) for path in paths]
return cls.query.options(*options)
|
[
"def",
"with_joined",
"(",
"cls",
",",
"*",
"paths",
")",
":",
"options",
"=",
"[",
"joinedload",
"(",
"path",
")",
"for",
"path",
"in",
"paths",
"]",
"return",
"cls",
".",
"query",
".",
"options",
"(",
"*",
"options",
")"
] |
Eagerload for simple cases where we need to just
joined load some relations
In strings syntax, you can split relations with dot
due to this SQLAlchemy feature: https://goo.gl/yM2DLX
:type paths: *List[str] | *List[InstrumentedAttribute]
Example 1:
Comment.with_joined('user', 'post', 'post.comments').first()
Example 2:
Comment.with_joined(Comment.user, Comment.post).first()
|
[
"Eagerload",
"for",
"simple",
"cases",
"where",
"we",
"need",
"to",
"just",
"joined",
"load",
"some",
"relations",
"In",
"strings",
"syntax",
"you",
"can",
"split",
"relations",
"with",
"dot",
"due",
"to",
"this",
"SQLAlchemy",
"feature",
":",
"https",
":",
"//",
"goo",
".",
"gl",
"/",
"yM2DLX",
":",
"type",
"paths",
":",
"*",
"List",
"[",
"str",
"]",
"|",
"*",
"List",
"[",
"InstrumentedAttribute",
"]",
"Example",
"1",
":",
"Comment",
".",
"with_joined",
"(",
"user",
"post",
"post",
".",
"comments",
")",
".",
"first",
"()",
"Example",
"2",
":",
"Comment",
".",
"with_joined",
"(",
"Comment",
".",
"user",
"Comment",
".",
"post",
")",
".",
"first",
"()"
] |
python
|
train
|
datastax/python-driver
|
cassandra/cluster.py
|
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cluster.py#L3366-L3378
|
def _address_from_row(self, row):
"""
Parse the broadcast rpc address from a row and return it untranslated.
"""
addr = None
if "rpc_address" in row:
addr = row.get("rpc_address") # peers and local
if "native_transport_address" in row:
addr = row.get("native_transport_address")
if not addr or addr in ["0.0.0.0", "::"]:
addr = row.get("peer")
return addr
|
[
"def",
"_address_from_row",
"(",
"self",
",",
"row",
")",
":",
"addr",
"=",
"None",
"if",
"\"rpc_address\"",
"in",
"row",
":",
"addr",
"=",
"row",
".",
"get",
"(",
"\"rpc_address\"",
")",
"# peers and local",
"if",
"\"native_transport_address\"",
"in",
"row",
":",
"addr",
"=",
"row",
".",
"get",
"(",
"\"native_transport_address\"",
")",
"if",
"not",
"addr",
"or",
"addr",
"in",
"[",
"\"0.0.0.0\"",
",",
"\"::\"",
"]",
":",
"addr",
"=",
"row",
".",
"get",
"(",
"\"peer\"",
")",
"return",
"addr"
] |
Parse the broadcast rpc address from a row and return it untranslated.
|
[
"Parse",
"the",
"broadcast",
"rpc",
"address",
"from",
"a",
"row",
"and",
"return",
"it",
"untranslated",
"."
] |
python
|
train
|
openstax/cnx-publishing
|
cnxpublishing/publish.py
|
https://github.com/openstax/cnx-publishing/blob/f55b4a2c45d8618737288f1b74b4139d5ac74154/cnxpublishing/publish.py#L529-L548
|
def get_previous_publication(cursor, ident_hash):
"""Get the previous publication of the given
publication as an ident-hash.
"""
cursor.execute("""\
WITH contextual_module AS (
SELECT uuid, module_ident
FROM modules
WHERE ident_hash(uuid, major_version, minor_version) = %s)
SELECT ident_hash(m.uuid, m.major_version, m.minor_version)
FROM modules AS m JOIN contextual_module AS context ON (m.uuid = context.uuid)
WHERE
m.module_ident < context.module_ident
ORDER BY revised DESC
LIMIT 1""", (ident_hash,))
try:
previous_ident_hash = cursor.fetchone()[0]
except TypeError: # NoneType
previous_ident_hash = None
return previous_ident_hash
|
[
"def",
"get_previous_publication",
"(",
"cursor",
",",
"ident_hash",
")",
":",
"cursor",
".",
"execute",
"(",
"\"\"\"\\\nWITH contextual_module AS (\n SELECT uuid, module_ident\n FROM modules\n WHERE ident_hash(uuid, major_version, minor_version) = %s)\nSELECT ident_hash(m.uuid, m.major_version, m.minor_version)\nFROM modules AS m JOIN contextual_module AS context ON (m.uuid = context.uuid)\nWHERE\n m.module_ident < context.module_ident\nORDER BY revised DESC\nLIMIT 1\"\"\"",
",",
"(",
"ident_hash",
",",
")",
")",
"try",
":",
"previous_ident_hash",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"[",
"0",
"]",
"except",
"TypeError",
":",
"# NoneType",
"previous_ident_hash",
"=",
"None",
"return",
"previous_ident_hash"
] |
Get the previous publication of the given
publication as an ident-hash.
|
[
"Get",
"the",
"previous",
"publication",
"of",
"the",
"given",
"publication",
"as",
"an",
"ident",
"-",
"hash",
"."
] |
python
|
valid
|
jgrassler/mkdocs-pandoc
|
mkdocs_pandoc/pandoc_converter.py
|
https://github.com/jgrassler/mkdocs-pandoc/blob/11edfb90830325dca85bd0369bb8e2da8d6815b3/mkdocs_pandoc/pandoc_converter.py#L68-L96
|
def flatten_pages(self, pages, level=1):
"""Recursively flattens pages data structure into a one-dimensional data structure"""
flattened = []
for page in pages:
if type(page) is list:
flattened.append(
{
'file': page[0],
'title': page[1],
'level': level,
})
if type(page) is dict:
if type(list(page.values())[0]) is str:
flattened.append(
{
'file': list(page.values())[0],
'title': list(page.keys())[0],
'level': level,
})
if type(list(page.values())[0]) is list:
flattened.extend(
self.flatten_pages(
list(page.values())[0],
level + 1)
)
return flattened
|
[
"def",
"flatten_pages",
"(",
"self",
",",
"pages",
",",
"level",
"=",
"1",
")",
":",
"flattened",
"=",
"[",
"]",
"for",
"page",
"in",
"pages",
":",
"if",
"type",
"(",
"page",
")",
"is",
"list",
":",
"flattened",
".",
"append",
"(",
"{",
"'file'",
":",
"page",
"[",
"0",
"]",
",",
"'title'",
":",
"page",
"[",
"1",
"]",
",",
"'level'",
":",
"level",
",",
"}",
")",
"if",
"type",
"(",
"page",
")",
"is",
"dict",
":",
"if",
"type",
"(",
"list",
"(",
"page",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
")",
"is",
"str",
":",
"flattened",
".",
"append",
"(",
"{",
"'file'",
":",
"list",
"(",
"page",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
",",
"'title'",
":",
"list",
"(",
"page",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
",",
"'level'",
":",
"level",
",",
"}",
")",
"if",
"type",
"(",
"list",
"(",
"page",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
")",
"is",
"list",
":",
"flattened",
".",
"extend",
"(",
"self",
".",
"flatten_pages",
"(",
"list",
"(",
"page",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
",",
"level",
"+",
"1",
")",
")",
"return",
"flattened"
] |
Recursively flattens pages data structure into a one-dimensional data structure
|
[
"Recursively",
"flattens",
"pages",
"data",
"structure",
"into",
"a",
"one",
"-",
"dimensional",
"data",
"structure"
] |
python
|
train
|
OCHA-DAP/hdx-python-api
|
src/hdx/data/dataset.py
|
https://github.com/OCHA-DAP/hdx-python-api/blob/212440f54f73805826a16db77dbcb6033b18a313/src/hdx/data/dataset.py#L202-L216
|
def delete_resource(self, resource, delete=True):
# type: (Union[hdx.data.resource.Resource,Dict,str], bool) -> bool
"""Delete a resource from the dataset and also from HDX by default
Args:
resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary
delete (bool): Whetehr to delete the resource from HDX (not just the dataset). Defaults to True.
Returns:
bool: True if resource removed or False if not
"""
if isinstance(resource, str):
if is_valid_uuid(resource) is False:
raise HDXError('%s is not a valid resource id!' % resource)
return self._remove_hdxobject(self.resources, resource, delete=delete)
|
[
"def",
"delete_resource",
"(",
"self",
",",
"resource",
",",
"delete",
"=",
"True",
")",
":",
"# type: (Union[hdx.data.resource.Resource,Dict,str], bool) -> bool",
"if",
"isinstance",
"(",
"resource",
",",
"str",
")",
":",
"if",
"is_valid_uuid",
"(",
"resource",
")",
"is",
"False",
":",
"raise",
"HDXError",
"(",
"'%s is not a valid resource id!'",
"%",
"resource",
")",
"return",
"self",
".",
"_remove_hdxobject",
"(",
"self",
".",
"resources",
",",
"resource",
",",
"delete",
"=",
"delete",
")"
] |
Delete a resource from the dataset and also from HDX by default
Args:
resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary
delete (bool): Whetehr to delete the resource from HDX (not just the dataset). Defaults to True.
Returns:
bool: True if resource removed or False if not
|
[
"Delete",
"a",
"resource",
"from",
"the",
"dataset",
"and",
"also",
"from",
"HDX",
"by",
"default"
] |
python
|
train
|
MrYsLab/pymata-aio
|
pymata_aio/pymata3.py
|
https://github.com/MrYsLab/pymata-aio/blob/015081a4628b9d47dfe3f8d6c698ff903f107810/pymata_aio/pymata3.py#L623-L630
|
def shutdown(self):
"""
Shutdown the application and exit
:returns: No return value
"""
task = asyncio.ensure_future(self.core.shutdown())
self.loop.run_until_complete(task)
|
[
"def",
"shutdown",
"(",
"self",
")",
":",
"task",
"=",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"core",
".",
"shutdown",
"(",
")",
")",
"self",
".",
"loop",
".",
"run_until_complete",
"(",
"task",
")"
] |
Shutdown the application and exit
:returns: No return value
|
[
"Shutdown",
"the",
"application",
"and",
"exit"
] |
python
|
train
|
readbeyond/aeneas
|
aeneas/syncmap/fragmentlist.py
|
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/syncmap/fragmentlist.py#L418-L505
|
def fragments_ending_inside_nonspeech_intervals(
self,
nonspeech_intervals,
tolerance
):
"""
Determine a list of pairs (nonspeech interval, fragment index),
such that the nonspeech interval contains exactly one fragment
ending inside it (within the given tolerance) and
adjacent to the next fragment.
:param nonspeech_intervals: the list of nonspeech intervals to be examined
:type nonspeech_intervals: list of :class:`~aeneas.exacttiming.TimeInterval`
:param tolerance: the tolerance to be applied when checking if the end point
falls within a given nonspeech interval
:type tolerance: :class:`~aeneas.exacttiming.TimeValue`
:rtype: list of (:class:`~aeneas.exacttiming.TimeInterval`, int)
"""
self.log(u"Called fragments_ending_inside_nonspeech_intervals")
self.log([u" List begin: %.3f", self.begin])
self.log([u" List end: %.3f", self.end])
nsi_index = 0
frag_index = 0
nsi_counter = [(n, []) for n in nonspeech_intervals]
# NOTE the last fragment is not eligible to be returned
while (nsi_index < len(nonspeech_intervals)) and (frag_index < len(self) - 1):
nsi = nonspeech_intervals[nsi_index]
if nsi.end > self.end:
self.log(u" nsi ends after self.end => breaking")
break
nsi_shadow = nsi.shadow(tolerance)
frag = self[frag_index]
self.log([u" nsi %s", nsi])
self.log([u" nsi_shadow %s", nsi_shadow])
self.log([u" frag %s", frag.interval])
if not frag.is_head_or_tail:
self.log(u" Fragment is not HEAD or TAIL => inspecting it")
if nsi_shadow.contains(frag.end):
if nsi_shadow.contains(frag.begin):
#
# *************** nsi shadow
# | *********** | nsi
# | ***X | frag (X=frag.end)
#
# NOTE this case might happen as the following:
#
# *************** nsi shadow
# | *** | nsi
# | **X | frag (X=frag.end)
#
# so we must invalidate the nsi if this happens
#
nsi_counter[nsi_index] = (None, [])
nsi_index += 1
frag_index += 1
self.log(u" nsi_shadow entirely contains frag => invalidate nsi, and skip to next fragment, nsi")
else:
#
# *************** nsi shadow
# | *********** | nsi
# *****|***X | frag (X=frag.end)
#
nsi_counter[nsi_index][1].append(frag_index)
frag_index += 1
self.log(u" nsi_shadow contains frag end only => save it and go to next fragment")
elif nsi_shadow.begin > frag.end:
#
# *************** nsi shadow
# | *********** | nsi
# **X | | frag (X=frag.end)
#
frag_index += 1
self.log(u" nsi_shadow begins after frag end => skip to next fragment")
else:
#
# *************** nsi shadow
# | *********** | nsi
# | *****|**X frag (X=frag.end)
#
nsi_index += 1
self.log(u" nsi_shadow ends before frag end => skip to next nsi")
else:
self.log(u" Fragment is HEAD or TAIL => skipping it")
frag_index += 1
self.log(u"")
tbr = [(n, c[0]) for (n, c) in nsi_counter if len(c) == 1]
self.log([u"Returning: %s", tbr])
return tbr
|
[
"def",
"fragments_ending_inside_nonspeech_intervals",
"(",
"self",
",",
"nonspeech_intervals",
",",
"tolerance",
")",
":",
"self",
".",
"log",
"(",
"u\"Called fragments_ending_inside_nonspeech_intervals\"",
")",
"self",
".",
"log",
"(",
"[",
"u\" List begin: %.3f\"",
",",
"self",
".",
"begin",
"]",
")",
"self",
".",
"log",
"(",
"[",
"u\" List end: %.3f\"",
",",
"self",
".",
"end",
"]",
")",
"nsi_index",
"=",
"0",
"frag_index",
"=",
"0",
"nsi_counter",
"=",
"[",
"(",
"n",
",",
"[",
"]",
")",
"for",
"n",
"in",
"nonspeech_intervals",
"]",
"# NOTE the last fragment is not eligible to be returned",
"while",
"(",
"nsi_index",
"<",
"len",
"(",
"nonspeech_intervals",
")",
")",
"and",
"(",
"frag_index",
"<",
"len",
"(",
"self",
")",
"-",
"1",
")",
":",
"nsi",
"=",
"nonspeech_intervals",
"[",
"nsi_index",
"]",
"if",
"nsi",
".",
"end",
">",
"self",
".",
"end",
":",
"self",
".",
"log",
"(",
"u\" nsi ends after self.end => breaking\"",
")",
"break",
"nsi_shadow",
"=",
"nsi",
".",
"shadow",
"(",
"tolerance",
")",
"frag",
"=",
"self",
"[",
"frag_index",
"]",
"self",
".",
"log",
"(",
"[",
"u\" nsi %s\"",
",",
"nsi",
"]",
")",
"self",
".",
"log",
"(",
"[",
"u\" nsi_shadow %s\"",
",",
"nsi_shadow",
"]",
")",
"self",
".",
"log",
"(",
"[",
"u\" frag %s\"",
",",
"frag",
".",
"interval",
"]",
")",
"if",
"not",
"frag",
".",
"is_head_or_tail",
":",
"self",
".",
"log",
"(",
"u\" Fragment is not HEAD or TAIL => inspecting it\"",
")",
"if",
"nsi_shadow",
".",
"contains",
"(",
"frag",
".",
"end",
")",
":",
"if",
"nsi_shadow",
".",
"contains",
"(",
"frag",
".",
"begin",
")",
":",
"#",
"# *************** nsi shadow",
"# | *********** | nsi",
"# | ***X | frag (X=frag.end)",
"#",
"# NOTE this case might happen as the following:",
"#",
"# *************** nsi shadow",
"# | *** | nsi",
"# | **X | frag (X=frag.end)",
"#",
"# so we must invalidate the nsi if this happens",
"#",
"nsi_counter",
"[",
"nsi_index",
"]",
"=",
"(",
"None",
",",
"[",
"]",
")",
"nsi_index",
"+=",
"1",
"frag_index",
"+=",
"1",
"self",
".",
"log",
"(",
"u\" nsi_shadow entirely contains frag => invalidate nsi, and skip to next fragment, nsi\"",
")",
"else",
":",
"#",
"# *************** nsi shadow",
"# | *********** | nsi",
"# *****|***X | frag (X=frag.end)",
"#",
"nsi_counter",
"[",
"nsi_index",
"]",
"[",
"1",
"]",
".",
"append",
"(",
"frag_index",
")",
"frag_index",
"+=",
"1",
"self",
".",
"log",
"(",
"u\" nsi_shadow contains frag end only => save it and go to next fragment\"",
")",
"elif",
"nsi_shadow",
".",
"begin",
">",
"frag",
".",
"end",
":",
"#",
"# *************** nsi shadow",
"# | *********** | nsi",
"# **X | | frag (X=frag.end)",
"#",
"frag_index",
"+=",
"1",
"self",
".",
"log",
"(",
"u\" nsi_shadow begins after frag end => skip to next fragment\"",
")",
"else",
":",
"#",
"# *************** nsi shadow",
"# | *********** | nsi",
"# | *****|**X frag (X=frag.end)",
"#",
"nsi_index",
"+=",
"1",
"self",
".",
"log",
"(",
"u\" nsi_shadow ends before frag end => skip to next nsi\"",
")",
"else",
":",
"self",
".",
"log",
"(",
"u\" Fragment is HEAD or TAIL => skipping it\"",
")",
"frag_index",
"+=",
"1",
"self",
".",
"log",
"(",
"u\"\"",
")",
"tbr",
"=",
"[",
"(",
"n",
",",
"c",
"[",
"0",
"]",
")",
"for",
"(",
"n",
",",
"c",
")",
"in",
"nsi_counter",
"if",
"len",
"(",
"c",
")",
"==",
"1",
"]",
"self",
".",
"log",
"(",
"[",
"u\"Returning: %s\"",
",",
"tbr",
"]",
")",
"return",
"tbr"
] |
Determine a list of pairs (nonspeech interval, fragment index),
such that the nonspeech interval contains exactly one fragment
ending inside it (within the given tolerance) and
adjacent to the next fragment.
:param nonspeech_intervals: the list of nonspeech intervals to be examined
:type nonspeech_intervals: list of :class:`~aeneas.exacttiming.TimeInterval`
:param tolerance: the tolerance to be applied when checking if the end point
falls within a given nonspeech interval
:type tolerance: :class:`~aeneas.exacttiming.TimeValue`
:rtype: list of (:class:`~aeneas.exacttiming.TimeInterval`, int)
|
[
"Determine",
"a",
"list",
"of",
"pairs",
"(",
"nonspeech",
"interval",
"fragment",
"index",
")",
"such",
"that",
"the",
"nonspeech",
"interval",
"contains",
"exactly",
"one",
"fragment",
"ending",
"inside",
"it",
"(",
"within",
"the",
"given",
"tolerance",
")",
"and",
"adjacent",
"to",
"the",
"next",
"fragment",
"."
] |
python
|
train
|
wandb/client
|
wandb/apis/file_stream.py
|
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/file_stream.py#L208-L216
|
def push(self, filename, data):
"""Push a chunk of a file to the streaming endpoint.
Args:
filename: Name of file that this is a chunk of.
chunk_id: TODO: change to 'offset'
chunk: File data.
"""
self._queue.put(Chunk(filename, data))
|
[
"def",
"push",
"(",
"self",
",",
"filename",
",",
"data",
")",
":",
"self",
".",
"_queue",
".",
"put",
"(",
"Chunk",
"(",
"filename",
",",
"data",
")",
")"
] |
Push a chunk of a file to the streaming endpoint.
Args:
filename: Name of file that this is a chunk of.
chunk_id: TODO: change to 'offset'
chunk: File data.
|
[
"Push",
"a",
"chunk",
"of",
"a",
"file",
"to",
"the",
"streaming",
"endpoint",
"."
] |
python
|
train
|
liamw9534/bt-manager
|
bt_manager/audio.py
|
https://github.com/liamw9534/bt-manager/blob/51be2919394ce8134c698359649bfad09eedf4ec/bt_manager/audio.py#L184-L191
|
def _transport_ready_handler(self, fd, cb_condition):
"""
Wrapper for calling user callback routine to notify
when transport data is ready to read
"""
if(self.user_cb):
self.user_cb(self.user_arg)
return True
|
[
"def",
"_transport_ready_handler",
"(",
"self",
",",
"fd",
",",
"cb_condition",
")",
":",
"if",
"(",
"self",
".",
"user_cb",
")",
":",
"self",
".",
"user_cb",
"(",
"self",
".",
"user_arg",
")",
"return",
"True"
] |
Wrapper for calling user callback routine to notify
when transport data is ready to read
|
[
"Wrapper",
"for",
"calling",
"user",
"callback",
"routine",
"to",
"notify",
"when",
"transport",
"data",
"is",
"ready",
"to",
"read"
] |
python
|
train
|
KE-works/pykechain
|
pykechain/models/activity2.py
|
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/activity2.py#L241-L266
|
def siblings(self, **kwargs):
"""Retrieve the other activities that also belong to the parent.
It returns a combination of Tasks (a.o. UserTasks) and Subprocesses on the level of the current task, including
itself. This also works if the activity is of type `ActivityType.PROCESS`.
:param kwargs: Additional search arguments, check :func:`pykechain.Client.activities` for additional info
:type kwargs: dict or None
:return: list of :class:`Activity2`
:raises NotFoundError: when it is a task in the top level of a project
Example
-------
>>> task = project.activity('Some Task')
>>> siblings = task.siblings()
Example for siblings containing certain words in the task name
>>> task = project.activity('Some Task')
>>> siblings = task.siblings(name__contains='Another Task')
"""
parent_id = self._json_data.get('parent_id')
if parent_id is None:
raise NotFoundError("Cannot find subprocess for this task '{}', "
"as this task exist on top level.".format(self.name))
return self._client.activities(parent_id=parent_id, scope=self.scope_id, **kwargs)
|
[
"def",
"siblings",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"parent_id",
"=",
"self",
".",
"_json_data",
".",
"get",
"(",
"'parent_id'",
")",
"if",
"parent_id",
"is",
"None",
":",
"raise",
"NotFoundError",
"(",
"\"Cannot find subprocess for this task '{}', \"",
"\"as this task exist on top level.\"",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"return",
"self",
".",
"_client",
".",
"activities",
"(",
"parent_id",
"=",
"parent_id",
",",
"scope",
"=",
"self",
".",
"scope_id",
",",
"*",
"*",
"kwargs",
")"
] |
Retrieve the other activities that also belong to the parent.
It returns a combination of Tasks (a.o. UserTasks) and Subprocesses on the level of the current task, including
itself. This also works if the activity is of type `ActivityType.PROCESS`.
:param kwargs: Additional search arguments, check :func:`pykechain.Client.activities` for additional info
:type kwargs: dict or None
:return: list of :class:`Activity2`
:raises NotFoundError: when it is a task in the top level of a project
Example
-------
>>> task = project.activity('Some Task')
>>> siblings = task.siblings()
Example for siblings containing certain words in the task name
>>> task = project.activity('Some Task')
>>> siblings = task.siblings(name__contains='Another Task')
|
[
"Retrieve",
"the",
"other",
"activities",
"that",
"also",
"belong",
"to",
"the",
"parent",
"."
] |
python
|
train
|
niklasf/python-chess
|
chess/pgn.py
|
https://github.com/niklasf/python-chess/blob/d91f986ca3e046b300a0d7d9ee2a13b07610fe1a/chess/pgn.py#L277-L279
|
def remove_variation(self, move: chess.Move) -> None:
"""Removes a variation."""
self.variations.remove(self.variation(move))
|
[
"def",
"remove_variation",
"(",
"self",
",",
"move",
":",
"chess",
".",
"Move",
")",
"->",
"None",
":",
"self",
".",
"variations",
".",
"remove",
"(",
"self",
".",
"variation",
"(",
"move",
")",
")"
] |
Removes a variation.
|
[
"Removes",
"a",
"variation",
"."
] |
python
|
train
|
pypa/pipenv
|
pipenv/vendor/pyparsing.py
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pyparsing.py#L2277-L2285
|
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""
Enable display of debugging messages while doing pattern matching.
"""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
|
[
"def",
"setDebugActions",
"(",
"self",
",",
"startAction",
",",
"successAction",
",",
"exceptionAction",
")",
":",
"self",
".",
"debugActions",
"=",
"(",
"startAction",
"or",
"_defaultStartDebugAction",
",",
"successAction",
"or",
"_defaultSuccessDebugAction",
",",
"exceptionAction",
"or",
"_defaultExceptionDebugAction",
")",
"self",
".",
"debug",
"=",
"True",
"return",
"self"
] |
Enable display of debugging messages while doing pattern matching.
|
[
"Enable",
"display",
"of",
"debugging",
"messages",
"while",
"doing",
"pattern",
"matching",
"."
] |
python
|
train
|
nerdynick/PySQLPool
|
src/PySQLPool/pool.py
|
https://github.com/nerdynick/PySQLPool/blob/a561275fea091e2667b69ce376c507f541b56e7d/src/PySQLPool/pool.py#L102-L123
|
def Commit(self):
"""
Commits all currently open connections
@author: Nick Verbeck
@since: 9/12/2008
"""
self.lock.acquire()
try:
for bucket in self.connections.values():
try:
for conn in bucket:
conn.lock()
try:
conn.commit()
conn.release()
except Exception:
conn.release()
except Exception:
pass
finally:
self.lock.release()
|
[
"def",
"Commit",
"(",
"self",
")",
":",
"self",
".",
"lock",
".",
"acquire",
"(",
")",
"try",
":",
"for",
"bucket",
"in",
"self",
".",
"connections",
".",
"values",
"(",
")",
":",
"try",
":",
"for",
"conn",
"in",
"bucket",
":",
"conn",
".",
"lock",
"(",
")",
"try",
":",
"conn",
".",
"commit",
"(",
")",
"conn",
".",
"release",
"(",
")",
"except",
"Exception",
":",
"conn",
".",
"release",
"(",
")",
"except",
"Exception",
":",
"pass",
"finally",
":",
"self",
".",
"lock",
".",
"release",
"(",
")"
] |
Commits all currently open connections
@author: Nick Verbeck
@since: 9/12/2008
|
[
"Commits",
"all",
"currently",
"open",
"connections"
] |
python
|
train
|
bykof/billomapy
|
billomapy/billomapy.py
|
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L331-L344
|
def get_all_client_properties(self, params=None):
"""
Get all contacts of client
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
"""
return self._iterate_through_pages(
get_function=self.get_client_properties_per_page,
resource=CLIENT_PROPERTIES,
**{'params': params}
)
|
[
"def",
"get_all_client_properties",
"(",
"self",
",",
"params",
"=",
"None",
")",
":",
"return",
"self",
".",
"_iterate_through_pages",
"(",
"get_function",
"=",
"self",
".",
"get_client_properties_per_page",
",",
"resource",
"=",
"CLIENT_PROPERTIES",
",",
"*",
"*",
"{",
"'params'",
":",
"params",
"}",
")"
] |
Get all contacts of client
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
|
[
"Get",
"all",
"contacts",
"of",
"client",
"This",
"will",
"iterate",
"over",
"all",
"pages",
"until",
"it",
"gets",
"all",
"elements",
".",
"So",
"if",
"the",
"rate",
"limit",
"exceeded",
"it",
"will",
"throw",
"an",
"Exception",
"and",
"you",
"will",
"get",
"nothing"
] |
python
|
train
|
xeroc/python-graphenelib
|
graphenestorage/masterpassword.py
|
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenestorage/masterpassword.py#L155-L161
|
def change_password(self, newpassword):
""" Change the password that allows to decrypt the master key
"""
if not self.unlocked():
raise WalletLocked
self.password = newpassword
self._save_encrypted_masterpassword()
|
[
"def",
"change_password",
"(",
"self",
",",
"newpassword",
")",
":",
"if",
"not",
"self",
".",
"unlocked",
"(",
")",
":",
"raise",
"WalletLocked",
"self",
".",
"password",
"=",
"newpassword",
"self",
".",
"_save_encrypted_masterpassword",
"(",
")"
] |
Change the password that allows to decrypt the master key
|
[
"Change",
"the",
"password",
"that",
"allows",
"to",
"decrypt",
"the",
"master",
"key"
] |
python
|
valid
|
Netflix-Skunkworks/historical
|
historical/security_group/collector.py
|
https://github.com/Netflix-Skunkworks/historical/blob/c3ebaa8388a3fe67e23a6c9c6b04c3e618497c4a/historical/security_group/collector.py#L95-L119
|
def create_delete_model(record):
"""Create a security group model from a record."""
data = cloudwatch.get_historical_base_info(record)
group_id = cloudwatch.filter_request_parameters('groupId', record)
# vpc_id = cloudwatch.filter_request_parameters('vpcId', record)
# group_name = cloudwatch.filter_request_parameters('groupName', record)
arn = get_arn(group_id, cloudwatch.get_region(record), record['account'])
LOG.debug(f'[-] Deleting Dynamodb Records. Hash Key: {arn}')
# Tombstone these records so that the deletion event time can be accurately tracked.
data.update({'configuration': {}})
items = list(CurrentSecurityGroupModel.query(arn, limit=1))
if items:
model_dict = items[0].__dict__['attribute_values'].copy()
model_dict.update(data)
model = CurrentSecurityGroupModel(**model_dict)
model.save()
return model
return None
|
[
"def",
"create_delete_model",
"(",
"record",
")",
":",
"data",
"=",
"cloudwatch",
".",
"get_historical_base_info",
"(",
"record",
")",
"group_id",
"=",
"cloudwatch",
".",
"filter_request_parameters",
"(",
"'groupId'",
",",
"record",
")",
"# vpc_id = cloudwatch.filter_request_parameters('vpcId', record)",
"# group_name = cloudwatch.filter_request_parameters('groupName', record)",
"arn",
"=",
"get_arn",
"(",
"group_id",
",",
"cloudwatch",
".",
"get_region",
"(",
"record",
")",
",",
"record",
"[",
"'account'",
"]",
")",
"LOG",
".",
"debug",
"(",
"f'[-] Deleting Dynamodb Records. Hash Key: {arn}'",
")",
"# Tombstone these records so that the deletion event time can be accurately tracked.",
"data",
".",
"update",
"(",
"{",
"'configuration'",
":",
"{",
"}",
"}",
")",
"items",
"=",
"list",
"(",
"CurrentSecurityGroupModel",
".",
"query",
"(",
"arn",
",",
"limit",
"=",
"1",
")",
")",
"if",
"items",
":",
"model_dict",
"=",
"items",
"[",
"0",
"]",
".",
"__dict__",
"[",
"'attribute_values'",
"]",
".",
"copy",
"(",
")",
"model_dict",
".",
"update",
"(",
"data",
")",
"model",
"=",
"CurrentSecurityGroupModel",
"(",
"*",
"*",
"model_dict",
")",
"model",
".",
"save",
"(",
")",
"return",
"model",
"return",
"None"
] |
Create a security group model from a record.
|
[
"Create",
"a",
"security",
"group",
"model",
"from",
"a",
"record",
"."
] |
python
|
train
|
gabstopper/smc-python
|
smc/elements/group.py
|
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/elements/group.py#L214-L230
|
def create(cls, name, members=None, comment=None):
"""
Create the TCP Service group
:param str name: name of tcp service group
:param list element: tcp services by element or href
:type element: list(str,Element)
:raises CreateElementFailed: element creation failed with reason
:return: instance with meta
:rtype: TCPServiceGroup
"""
element = [] if members is None else element_resolver(members)
json = {'name': name,
'element': element,
'comment': comment}
return ElementCreator(cls, json)
|
[
"def",
"create",
"(",
"cls",
",",
"name",
",",
"members",
"=",
"None",
",",
"comment",
"=",
"None",
")",
":",
"element",
"=",
"[",
"]",
"if",
"members",
"is",
"None",
"else",
"element_resolver",
"(",
"members",
")",
"json",
"=",
"{",
"'name'",
":",
"name",
",",
"'element'",
":",
"element",
",",
"'comment'",
":",
"comment",
"}",
"return",
"ElementCreator",
"(",
"cls",
",",
"json",
")"
] |
Create the TCP Service group
:param str name: name of tcp service group
:param list element: tcp services by element or href
:type element: list(str,Element)
:raises CreateElementFailed: element creation failed with reason
:return: instance with meta
:rtype: TCPServiceGroup
|
[
"Create",
"the",
"TCP",
"Service",
"group"
] |
python
|
train
|
Julian/jsonschema
|
jsonschema/_utils.py
|
https://github.com/Julian/jsonschema/blob/a72332004cdc3ba456de7918bc32059822b2f69a/jsonschema/_utils.py#L109-L119
|
def extras_msg(extras):
"""
Create an error message for extra items or properties.
"""
if len(extras) == 1:
verb = "was"
else:
verb = "were"
return ", ".join(repr(extra) for extra in extras), verb
|
[
"def",
"extras_msg",
"(",
"extras",
")",
":",
"if",
"len",
"(",
"extras",
")",
"==",
"1",
":",
"verb",
"=",
"\"was\"",
"else",
":",
"verb",
"=",
"\"were\"",
"return",
"\", \"",
".",
"join",
"(",
"repr",
"(",
"extra",
")",
"for",
"extra",
"in",
"extras",
")",
",",
"verb"
] |
Create an error message for extra items or properties.
|
[
"Create",
"an",
"error",
"message",
"for",
"extra",
"items",
"or",
"properties",
"."
] |
python
|
train
|
HPENetworking/PYHPEIMC
|
archived/pyhpimc.py
|
https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/archived/pyhpimc.py#L620-L639
|
def set_inteface_up(devid, ifindex):
"""
function takest devid and ifindex of specific device and interface and issues a RESTFUL call to "undo shut" the spec
ified interface on the target device.
:param devid: int or str value of the target device
:param ifindex: int or str value of the target interface
:return: HTTP status code 204 with no values.
"""
if auth is None or url is None: # checks to see if the imc credentials are already available
set_imc_creds()
set_int_up_url = "/imcrs/plat/res/device/" + str(devid) + "/interface/" + str(ifindex) + "/up"
f_url = url + set_int_up_url
payload = None
r = requests.put(f_url, auth=auth,
headers=headers) # creates the URL using the payload variable as the contents
print(r.status_code)
if r.status_code == 204:
return r.status_code
else:
print("An Error has occured")
|
[
"def",
"set_inteface_up",
"(",
"devid",
",",
"ifindex",
")",
":",
"if",
"auth",
"is",
"None",
"or",
"url",
"is",
"None",
":",
"# checks to see if the imc credentials are already available",
"set_imc_creds",
"(",
")",
"set_int_up_url",
"=",
"\"/imcrs/plat/res/device/\"",
"+",
"str",
"(",
"devid",
")",
"+",
"\"/interface/\"",
"+",
"str",
"(",
"ifindex",
")",
"+",
"\"/up\"",
"f_url",
"=",
"url",
"+",
"set_int_up_url",
"payload",
"=",
"None",
"r",
"=",
"requests",
".",
"put",
"(",
"f_url",
",",
"auth",
"=",
"auth",
",",
"headers",
"=",
"headers",
")",
"# creates the URL using the payload variable as the contents",
"print",
"(",
"r",
".",
"status_code",
")",
"if",
"r",
".",
"status_code",
"==",
"204",
":",
"return",
"r",
".",
"status_code",
"else",
":",
"print",
"(",
"\"An Error has occured\"",
")"
] |
function takest devid and ifindex of specific device and interface and issues a RESTFUL call to "undo shut" the spec
ified interface on the target device.
:param devid: int or str value of the target device
:param ifindex: int or str value of the target interface
:return: HTTP status code 204 with no values.
|
[
"function",
"takest",
"devid",
"and",
"ifindex",
"of",
"specific",
"device",
"and",
"interface",
"and",
"issues",
"a",
"RESTFUL",
"call",
"to",
"undo",
"shut",
"the",
"spec",
"ified",
"interface",
"on",
"the",
"target",
"device",
".",
":",
"param",
"devid",
":",
"int",
"or",
"str",
"value",
"of",
"the",
"target",
"device",
":",
"param",
"ifindex",
":",
"int",
"or",
"str",
"value",
"of",
"the",
"target",
"interface",
":",
"return",
":",
"HTTP",
"status",
"code",
"204",
"with",
"no",
"values",
"."
] |
python
|
train
|
rossant/ipymd
|
ipymd/lib/opendocument.py
|
https://github.com/rossant/ipymd/blob/d87c9ebc59d67fe78b0139ee00e0e5307682e303/ipymd/lib/opendocument.py#L742-L769
|
def _item_type(item):
"""Indicate to the ODF reader the type of the block or text."""
tag = item['tag']
style = item.get('style', None)
if tag == 'p':
if style is None or 'paragraph' in style:
return 'paragraph'
else:
return style
elif tag == 'span':
if style in (None, 'normal-text'):
return 'text'
elif style == 'url':
return 'link'
else:
return style
elif tag == 'h':
assert style is not None
return style
elif tag in ('list', 'list-item', 'line-break'):
if style == '_numbered_list':
return 'numbered-list'
else:
return tag
elif tag == 's':
return 'spaces'
raise Exception("The tag '{0}' with style '{1}' hasn't "
"been implemented.".format(tag, style))
|
[
"def",
"_item_type",
"(",
"item",
")",
":",
"tag",
"=",
"item",
"[",
"'tag'",
"]",
"style",
"=",
"item",
".",
"get",
"(",
"'style'",
",",
"None",
")",
"if",
"tag",
"==",
"'p'",
":",
"if",
"style",
"is",
"None",
"or",
"'paragraph'",
"in",
"style",
":",
"return",
"'paragraph'",
"else",
":",
"return",
"style",
"elif",
"tag",
"==",
"'span'",
":",
"if",
"style",
"in",
"(",
"None",
",",
"'normal-text'",
")",
":",
"return",
"'text'",
"elif",
"style",
"==",
"'url'",
":",
"return",
"'link'",
"else",
":",
"return",
"style",
"elif",
"tag",
"==",
"'h'",
":",
"assert",
"style",
"is",
"not",
"None",
"return",
"style",
"elif",
"tag",
"in",
"(",
"'list'",
",",
"'list-item'",
",",
"'line-break'",
")",
":",
"if",
"style",
"==",
"'_numbered_list'",
":",
"return",
"'numbered-list'",
"else",
":",
"return",
"tag",
"elif",
"tag",
"==",
"'s'",
":",
"return",
"'spaces'",
"raise",
"Exception",
"(",
"\"The tag '{0}' with style '{1}' hasn't \"",
"\"been implemented.\"",
".",
"format",
"(",
"tag",
",",
"style",
")",
")"
] |
Indicate to the ODF reader the type of the block or text.
|
[
"Indicate",
"to",
"the",
"ODF",
"reader",
"the",
"type",
"of",
"the",
"block",
"or",
"text",
"."
] |
python
|
train
|
Esri/ArcREST
|
src/arcrest/manageags/_system.py
|
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageags/_system.py#L285-L325
|
def registerWebAdaptor(self, webAdaptorURL, machineName, machineIP,
isAdminEnabled, description, httpPort, httpsPort):
"""
You can use this operation to register the ArcGIS Web Adaptor
from your ArcGIS Server. By registering the Web Adaptor with the server,
you are telling the server to trust requests (including security
credentials) that have been submitted through this Web Adaptor.
Inputs:
webAdaptorURL - The URL of the web adaptor through which ArcGIS
resources will be accessed.
machineName - The machine name on which the web adaptor is installed.
machineIP - The local IP address of the machine on which the web
adaptor is installed.
isAdminEnabled - A boolean flag to indicate if administrative access
is allowed through the web adaptor. The default is
false.
description - An optional description for the web adaptor.
httpPort - An optional parameter to indicate the HTTP port of the
web adaptor. If this parameter is not provided, it
is derived from the URL.
httpsPort - An optional parameter to indicate the HTTPS port of the web
adaptor. If this parameter is not provided, it is
derived from the URL.
"""
url = self._url + "/webadaptors/register"
params = {
"f" : "json",
"webAdaptorURL" : webAdaptorURL,
"machineName" : machineName,
"machineIP" : machineIP,
"isAdminEnabled" : isAdminEnabled,
"description" : description,
"httpPort" : httpPort,
"httpsPort" : httpsPort
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
|
[
"def",
"registerWebAdaptor",
"(",
"self",
",",
"webAdaptorURL",
",",
"machineName",
",",
"machineIP",
",",
"isAdminEnabled",
",",
"description",
",",
"httpPort",
",",
"httpsPort",
")",
":",
"url",
"=",
"self",
".",
"_url",
"+",
"\"/webadaptors/register\"",
"params",
"=",
"{",
"\"f\"",
":",
"\"json\"",
",",
"\"webAdaptorURL\"",
":",
"webAdaptorURL",
",",
"\"machineName\"",
":",
"machineName",
",",
"\"machineIP\"",
":",
"machineIP",
",",
"\"isAdminEnabled\"",
":",
"isAdminEnabled",
",",
"\"description\"",
":",
"description",
",",
"\"httpPort\"",
":",
"httpPort",
",",
"\"httpsPort\"",
":",
"httpsPort",
"}",
"return",
"self",
".",
"_post",
"(",
"url",
"=",
"url",
",",
"param_dict",
"=",
"params",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
")"
] |
You can use this operation to register the ArcGIS Web Adaptor
from your ArcGIS Server. By registering the Web Adaptor with the server,
you are telling the server to trust requests (including security
credentials) that have been submitted through this Web Adaptor.
Inputs:
webAdaptorURL - The URL of the web adaptor through which ArcGIS
resources will be accessed.
machineName - The machine name on which the web adaptor is installed.
machineIP - The local IP address of the machine on which the web
adaptor is installed.
isAdminEnabled - A boolean flag to indicate if administrative access
is allowed through the web adaptor. The default is
false.
description - An optional description for the web adaptor.
httpPort - An optional parameter to indicate the HTTP port of the
web adaptor. If this parameter is not provided, it
is derived from the URL.
httpsPort - An optional parameter to indicate the HTTPS port of the web
adaptor. If this parameter is not provided, it is
derived from the URL.
|
[
"You",
"can",
"use",
"this",
"operation",
"to",
"register",
"the",
"ArcGIS",
"Web",
"Adaptor",
"from",
"your",
"ArcGIS",
"Server",
".",
"By",
"registering",
"the",
"Web",
"Adaptor",
"with",
"the",
"server",
"you",
"are",
"telling",
"the",
"server",
"to",
"trust",
"requests",
"(",
"including",
"security",
"credentials",
")",
"that",
"have",
"been",
"submitted",
"through",
"this",
"Web",
"Adaptor",
"."
] |
python
|
train
|
google/dotty
|
efilter/parsers/common/token_stream.py
|
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/token_stream.py#L101-L115
|
def expect(self, f, *args):
"""Like 'accept' but throws a parse error if 'f' doesn't match."""
match = self.accept(f, *args)
if match:
return match
try:
func_name = f.func_name
except AttributeError:
func_name = "<unnamed grammar function>"
start, end = self.current_position()
raise errors.EfilterParseError(
query=self.tokenizer.source, start=start, end=end,
message="Was expecting %s here." % (func_name))
|
[
"def",
"expect",
"(",
"self",
",",
"f",
",",
"*",
"args",
")",
":",
"match",
"=",
"self",
".",
"accept",
"(",
"f",
",",
"*",
"args",
")",
"if",
"match",
":",
"return",
"match",
"try",
":",
"func_name",
"=",
"f",
".",
"func_name",
"except",
"AttributeError",
":",
"func_name",
"=",
"\"<unnamed grammar function>\"",
"start",
",",
"end",
"=",
"self",
".",
"current_position",
"(",
")",
"raise",
"errors",
".",
"EfilterParseError",
"(",
"query",
"=",
"self",
".",
"tokenizer",
".",
"source",
",",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"message",
"=",
"\"Was expecting %s here.\"",
"%",
"(",
"func_name",
")",
")"
] |
Like 'accept' but throws a parse error if 'f' doesn't match.
|
[
"Like",
"accept",
"but",
"throws",
"a",
"parse",
"error",
"if",
"f",
"doesn",
"t",
"match",
"."
] |
python
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.