repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
sibirrer/lenstronomy | lenstronomy/LensModel/lens_model.py | https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/lens_model.py#L105-L117 | def alpha(self, x, y, kwargs, k=None):
"""
deflection angles
:param x: x-position (preferentially arcsec)
:type x: numpy array
:param y: y-position (preferentially arcsec)
:type y: numpy array
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param k: only evaluate the k-th lens model
:return: deflection angles in units of arcsec
"""
return self.lens_model.alpha(x, y, kwargs, k=k) | [
"def",
"alpha",
"(",
"self",
",",
"x",
",",
"y",
",",
"kwargs",
",",
"k",
"=",
"None",
")",
":",
"return",
"self",
".",
"lens_model",
".",
"alpha",
"(",
"x",
",",
"y",
",",
"kwargs",
",",
"k",
"=",
"k",
")"
]
| deflection angles
:param x: x-position (preferentially arcsec)
:type x: numpy array
:param y: y-position (preferentially arcsec)
:type y: numpy array
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param k: only evaluate the k-th lens model
:return: deflection angles in units of arcsec | [
"deflection",
"angles"
]
| python | train | 39.153846 |
pypa/setuptools | setuptools/msvc.py | https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/msvc.py#L411-L427 | def microsoft(self, key, x86=False):
"""
Return key in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
x86: str
Force x86 software registry.
Return
------
str: value
"""
node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node'
return os.path.join('Software', node64, 'Microsoft', key) | [
"def",
"microsoft",
"(",
"self",
",",
"key",
",",
"x86",
"=",
"False",
")",
":",
"node64",
"=",
"''",
"if",
"self",
".",
"pi",
".",
"current_is_x86",
"(",
")",
"or",
"x86",
"else",
"'Wow6432Node'",
"return",
"os",
".",
"path",
".",
"join",
"(",
"'Software'",
",",
"node64",
",",
"'Microsoft'",
",",
"key",
")"
]
| Return key in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
x86: str
Force x86 software registry.
Return
------
str: value | [
"Return",
"key",
"in",
"Microsoft",
"software",
"registry",
"."
]
| python | train | 25.941176 |
fedora-python/pyp2rpm | pyp2rpm/package_getters.py | https://github.com/fedora-python/pyp2rpm/blob/853eb3d226689a5ccdcdb9358b1a3394fafbd2b5/pyp2rpm/package_getters.py#L152-L173 | def get(self, wheel=False):
"""Downloads the package from PyPI.
Returns:
Full path of the downloaded file.
Raises:
PermissionError if the save_dir is not writable.
"""
try:
url = get_url(self.client, self.name, self.version,
wheel, hashed_format=True)[0]
except exceptions.MissingUrlException as e:
raise SystemExit(e)
if wheel:
self.temp_dir = tempfile.mkdtemp()
save_dir = self.temp_dir
else:
save_dir = self.save_dir
save_file = '{0}/{1}'.format(save_dir, url.split('/')[-1])
request.urlretrieve(url, save_file)
logger.info('Downloaded package from PyPI: {0}.'.format(save_file))
return save_file | [
"def",
"get",
"(",
"self",
",",
"wheel",
"=",
"False",
")",
":",
"try",
":",
"url",
"=",
"get_url",
"(",
"self",
".",
"client",
",",
"self",
".",
"name",
",",
"self",
".",
"version",
",",
"wheel",
",",
"hashed_format",
"=",
"True",
")",
"[",
"0",
"]",
"except",
"exceptions",
".",
"MissingUrlException",
"as",
"e",
":",
"raise",
"SystemExit",
"(",
"e",
")",
"if",
"wheel",
":",
"self",
".",
"temp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"save_dir",
"=",
"self",
".",
"temp_dir",
"else",
":",
"save_dir",
"=",
"self",
".",
"save_dir",
"save_file",
"=",
"'{0}/{1}'",
".",
"format",
"(",
"save_dir",
",",
"url",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
")",
"request",
".",
"urlretrieve",
"(",
"url",
",",
"save_file",
")",
"logger",
".",
"info",
"(",
"'Downloaded package from PyPI: {0}.'",
".",
"format",
"(",
"save_file",
")",
")",
"return",
"save_file"
]
| Downloads the package from PyPI.
Returns:
Full path of the downloaded file.
Raises:
PermissionError if the save_dir is not writable. | [
"Downloads",
"the",
"package",
"from",
"PyPI",
".",
"Returns",
":",
"Full",
"path",
"of",
"the",
"downloaded",
"file",
".",
"Raises",
":",
"PermissionError",
"if",
"the",
"save_dir",
"is",
"not",
"writable",
"."
]
| python | train | 35.681818 |
blubberdiblub/eztemplate | eztemplate/__main__.py | https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/eztemplate/__main__.py#L230-L238 | def check_engine(handle):
"""Check availability of requested template engine."""
if handle == 'help':
dump_engines()
sys.exit(0)
if handle not in engines.engines:
print('Engine "%s" is not available.' % (handle,), file=sys.stderr)
sys.exit(1) | [
"def",
"check_engine",
"(",
"handle",
")",
":",
"if",
"handle",
"==",
"'help'",
":",
"dump_engines",
"(",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"if",
"handle",
"not",
"in",
"engines",
".",
"engines",
":",
"print",
"(",
"'Engine \"%s\" is not available.'",
"%",
"(",
"handle",
",",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
]
| Check availability of requested template engine. | [
"Check",
"availability",
"of",
"requested",
"template",
"engine",
"."
]
| python | train | 31 |
googleapis/google-cloud-python | bigquery/samples/get_model.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/samples/get_model.py#L16-L34 | def get_model(client, model_id):
"""Sample ID: go/samples-tracker/1510"""
# [START bigquery_get_model]
from google.cloud import bigquery
# TODO(developer): Construct a BigQuery client object.
# client = bigquery.Client()
# TODO(developer): Set model_id to the ID of the model to fetch.
# model_id = 'your-project.your_dataset.your_model'
model = client.get_model(model_id)
full_model_id = "{}.{}.{}".format(model.project, model.dataset_id, model.model_id)
friendly_name = model.friendly_name
print(
"Got model '{}' with friendly_name '{}'.".format(full_model_id, friendly_name)
) | [
"def",
"get_model",
"(",
"client",
",",
"model_id",
")",
":",
"# [START bigquery_get_model]",
"from",
"google",
".",
"cloud",
"import",
"bigquery",
"# TODO(developer): Construct a BigQuery client object.",
"# client = bigquery.Client()",
"# TODO(developer): Set model_id to the ID of the model to fetch.",
"# model_id = 'your-project.your_dataset.your_model'",
"model",
"=",
"client",
".",
"get_model",
"(",
"model_id",
")",
"full_model_id",
"=",
"\"{}.{}.{}\"",
".",
"format",
"(",
"model",
".",
"project",
",",
"model",
".",
"dataset_id",
",",
"model",
".",
"model_id",
")",
"friendly_name",
"=",
"model",
".",
"friendly_name",
"print",
"(",
"\"Got model '{}' with friendly_name '{}'.\"",
".",
"format",
"(",
"full_model_id",
",",
"friendly_name",
")",
")"
]
| Sample ID: go/samples-tracker/1510 | [
"Sample",
"ID",
":",
"go",
"/",
"samples",
"-",
"tracker",
"/",
"1510"
]
| python | train | 32.736842 |
thespacedoctor/sloancone | build/lib/sloancone/image.py | https://github.com/thespacedoctor/sloancone/blob/106ea6533ad57f5f0ca82bf6db3053132bdb42e1/build/lib/sloancone/image.py#L136-L194 | def _download_sdss_image(
self):
"""*download sdss image*
"""
self.log.info('starting the ``_download_sdss_image`` method')
opt = ""
if self.grid:
opt += "G"
if self.label:
opt += "L"
if self.photocat:
opt += "P"
if self.speccat:
opt += "S"
if self.invertColors:
opt += "I"
if len(opt):
opt = "opt=%(opt)s&" % locals()
width = self.pixelWidth
scale = (self.arcminWidth * 60.) / width
converter = unit_conversion(
log=self.log
)
ra = converter.ra_sexegesimal_to_decimal(
ra=self.ra
)
dec = converter.dec_sexegesimal_to_decimal(
dec=self.dec
)
url = """http://skyservice.pha.jhu.edu/DR12/ImgCutout/getjpeg.aspx?ra=%(ra)s&dec=%(dec)s&scale=%(scale)s&%(opt)sPhotoObjs=on&width=%(width)s&height=%(width)s""" % locals(
)
from fundamentals.download import multiobject_download
localUrls = multiobject_download(
urlList=[url],
downloadDirectory=self.downloadDirectory,
log=self.log,
timeStamp=False,
timeout=180,
concurrentDownloads=10,
resetFilename=[self.filename],
credentials=False, # { 'username' : "...", "password", "..." }
longTime=True,
indexFilenames=False
)
print url
self.log.info('completed the ``_download_sdss_image`` method')
return None | [
"def",
"_download_sdss_image",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'starting the ``_download_sdss_image`` method'",
")",
"opt",
"=",
"\"\"",
"if",
"self",
".",
"grid",
":",
"opt",
"+=",
"\"G\"",
"if",
"self",
".",
"label",
":",
"opt",
"+=",
"\"L\"",
"if",
"self",
".",
"photocat",
":",
"opt",
"+=",
"\"P\"",
"if",
"self",
".",
"speccat",
":",
"opt",
"+=",
"\"S\"",
"if",
"self",
".",
"invertColors",
":",
"opt",
"+=",
"\"I\"",
"if",
"len",
"(",
"opt",
")",
":",
"opt",
"=",
"\"opt=%(opt)s&\"",
"%",
"locals",
"(",
")",
"width",
"=",
"self",
".",
"pixelWidth",
"scale",
"=",
"(",
"self",
".",
"arcminWidth",
"*",
"60.",
")",
"/",
"width",
"converter",
"=",
"unit_conversion",
"(",
"log",
"=",
"self",
".",
"log",
")",
"ra",
"=",
"converter",
".",
"ra_sexegesimal_to_decimal",
"(",
"ra",
"=",
"self",
".",
"ra",
")",
"dec",
"=",
"converter",
".",
"dec_sexegesimal_to_decimal",
"(",
"dec",
"=",
"self",
".",
"dec",
")",
"url",
"=",
"\"\"\"http://skyservice.pha.jhu.edu/DR12/ImgCutout/getjpeg.aspx?ra=%(ra)s&dec=%(dec)s&scale=%(scale)s&%(opt)sPhotoObjs=on&width=%(width)s&height=%(width)s\"\"\"",
"%",
"locals",
"(",
")",
"from",
"fundamentals",
".",
"download",
"import",
"multiobject_download",
"localUrls",
"=",
"multiobject_download",
"(",
"urlList",
"=",
"[",
"url",
"]",
",",
"downloadDirectory",
"=",
"self",
".",
"downloadDirectory",
",",
"log",
"=",
"self",
".",
"log",
",",
"timeStamp",
"=",
"False",
",",
"timeout",
"=",
"180",
",",
"concurrentDownloads",
"=",
"10",
",",
"resetFilename",
"=",
"[",
"self",
".",
"filename",
"]",
",",
"credentials",
"=",
"False",
",",
"# { 'username' : \"...\", \"password\", \"...\" }",
"longTime",
"=",
"True",
",",
"indexFilenames",
"=",
"False",
")",
"print",
"url",
"self",
".",
"log",
".",
"info",
"(",
"'completed the ``_download_sdss_image`` method'",
")",
"return",
"None"
]
| *download sdss image* | [
"*",
"download",
"sdss",
"image",
"*"
]
| python | train | 26.237288 |
paylogic/pip-accel | pip_accel/caches/__init__.py | https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/caches/__init__.py#L159-L178 | def get(self, requirement):
"""
Get a distribution archive from any of the available caches.
:param requirement: A :class:`.Requirement` object.
:returns: The absolute pathname of a local file or :data:`None` when the
distribution archive is missing from all available caches.
"""
filename = self.generate_filename(requirement)
for backend in list(self.backends):
try:
pathname = backend.get(filename)
if pathname is not None:
return pathname
except CacheBackendDisabledError as e:
logger.debug("Disabling %s because it requires configuration: %s", backend, e)
self.backends.remove(backend)
except Exception as e:
logger.exception("Disabling %s because it failed: %s", backend, e)
self.backends.remove(backend) | [
"def",
"get",
"(",
"self",
",",
"requirement",
")",
":",
"filename",
"=",
"self",
".",
"generate_filename",
"(",
"requirement",
")",
"for",
"backend",
"in",
"list",
"(",
"self",
".",
"backends",
")",
":",
"try",
":",
"pathname",
"=",
"backend",
".",
"get",
"(",
"filename",
")",
"if",
"pathname",
"is",
"not",
"None",
":",
"return",
"pathname",
"except",
"CacheBackendDisabledError",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Disabling %s because it requires configuration: %s\"",
",",
"backend",
",",
"e",
")",
"self",
".",
"backends",
".",
"remove",
"(",
"backend",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"\"Disabling %s because it failed: %s\"",
",",
"backend",
",",
"e",
")",
"self",
".",
"backends",
".",
"remove",
"(",
"backend",
")"
]
| Get a distribution archive from any of the available caches.
:param requirement: A :class:`.Requirement` object.
:returns: The absolute pathname of a local file or :data:`None` when the
distribution archive is missing from all available caches. | [
"Get",
"a",
"distribution",
"archive",
"from",
"any",
"of",
"the",
"available",
"caches",
"."
]
| python | train | 45.9 |
farzadghanei/distutilazy | distutilazy/clean.py | https://github.com/farzadghanei/distutilazy/blob/c3c7d062f7cb79abb7677cac57dd752127ff78e7/distutilazy/clean.py#L45-L58 | def _find_files(self):
"""Find files recursively in the root path
using provided extensions.
:return: list of absolute file paths
"""
files = []
for ext in self.extensions:
ext_files = util.find_files(self.root, "*" + ext)
log.debug("found {} '*{}' files in '{}'".format(
len(ext_files), ext, self.root)
)
files.extend(ext_files)
return files | [
"def",
"_find_files",
"(",
"self",
")",
":",
"files",
"=",
"[",
"]",
"for",
"ext",
"in",
"self",
".",
"extensions",
":",
"ext_files",
"=",
"util",
".",
"find_files",
"(",
"self",
".",
"root",
",",
"\"*\"",
"+",
"ext",
")",
"log",
".",
"debug",
"(",
"\"found {} '*{}' files in '{}'\"",
".",
"format",
"(",
"len",
"(",
"ext_files",
")",
",",
"ext",
",",
"self",
".",
"root",
")",
")",
"files",
".",
"extend",
"(",
"ext_files",
")",
"return",
"files"
]
| Find files recursively in the root path
using provided extensions.
:return: list of absolute file paths | [
"Find",
"files",
"recursively",
"in",
"the",
"root",
"path",
"using",
"provided",
"extensions",
"."
]
| python | train | 32.142857 |
OpenKMIP/PyKMIP | kmip/core/enums.py | https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/enums.py#L1835-L1848 | def get_enumerations_from_bit_mask(enumeration, mask):
"""
A utility function that creates a list of enumeration values from a bit
mask for a specific mask enumeration class.
Args:
enumeration (class): The enumeration class from which to draw
enumeration values.
mask (int): The bit mask from which to identify enumeration values.
Returns:
list: A list of enumeration values corresponding to the bit mask.
"""
return [x for x in enumeration if (x.value & mask) == x.value] | [
"def",
"get_enumerations_from_bit_mask",
"(",
"enumeration",
",",
"mask",
")",
":",
"return",
"[",
"x",
"for",
"x",
"in",
"enumeration",
"if",
"(",
"x",
".",
"value",
"&",
"mask",
")",
"==",
"x",
".",
"value",
"]"
]
| A utility function that creates a list of enumeration values from a bit
mask for a specific mask enumeration class.
Args:
enumeration (class): The enumeration class from which to draw
enumeration values.
mask (int): The bit mask from which to identify enumeration values.
Returns:
list: A list of enumeration values corresponding to the bit mask. | [
"A",
"utility",
"function",
"that",
"creates",
"a",
"list",
"of",
"enumeration",
"values",
"from",
"a",
"bit",
"mask",
"for",
"a",
"specific",
"mask",
"enumeration",
"class",
"."
]
| python | test | 37.5 |
squidsoup/muddle.py | muddle/api.py | https://github.com/squidsoup/muddle.py/blob/f58c62e7d92b9ac24a16de007c0fbd6607b15687/muddle/api.py#L46-L120 | def create(self, fullname, shortname, category_id, **kwargs):
"""
Create a new course
:param string fullname: The course's fullname
:param string shortname: The course's shortname
:param int category_id: The course's category
:keyword string idnumber: (optional) Course ID number. \
Yes, it's a string, blame Moodle.
:keyword int summaryformat: (optional) Defaults to 1 (HTML). \
Summary format options: (1 = HTML, 0 = Moodle, 2 = Plain, \
or 4 = Markdown)
:keyword string format: (optional) Defaults to "topics"
Topic options: (weeks, topics, social, site)
:keyword bool showgrades: (optional) Defaults to True. \
Determines if grades are shown
:keyword int newsitems: (optional) Defaults to 5. \
Number of recent items appearing on the course page
:keyword bool startdate: (optional) Timestamp when the course start
:keyword int maxbytes: (optional) Defaults to 83886080. \
Largest size of file that can be uploaded into the course
:keyword bool showreports: Default to True. Are activity report shown?
:keyword bool visible: (optional) Determines if course is \
visible to students
:keyword int groupmode: (optional) Defaults to 2.
options: (0 = no group, 1 = separate, 2 = visible)
:keyword bool groupmodeforce: (optional) Defaults to False. \
Force group mode
:keyword int defaultgroupingid: (optional) Defaults to 0. \
Default grouping id
:keyword bool enablecompletion: (optional) Enable control via \
completion in activity settings.
:keyword bool completionstartonenrol: (optional) \
Begin tracking a student's progress in course completion after
:keyword bool completionnotify: (optional) Default? Dunno. \
Presumably notifies course completion
:keyword string lang: (optional) Force course language.
:keyword string forcetheme: (optional) Name of the force theme
Example Usage::
>>> import muddle
>>> muddle.course().create('a new course', 'new-course', 20)
"""
allowed_options = ['idnumber', 'summaryformat',
'format', 'showgrades',
'newsitems', 'startdate',
'maxbytes', 'showreports',
'visible', 'groupmode',
'groupmodeforce', 'jdefaultgroupingid',
'enablecompletion', 'completionstartonenrol',
'completionnotify', 'lang',
'forcetheme']
if valid_options(kwargs, allowed_options):
option_params = {}
for index, key in enumerate(kwargs):
val = kwargs.get(key)
if isinstance(val, bool):
val = int(val)
option_params.update({'courses[0][' + key + ']': val})
params = {'wsfunction': 'core_course_create_courses',
'courses[0][fullname]': fullname,
'courses[0][shortname]': shortname,
'courses[0][categoryid]': category_id}
params.update(option_params)
params.update(self.request_params)
return requests.post(self.api_url, params=params, verify=False) | [
"def",
"create",
"(",
"self",
",",
"fullname",
",",
"shortname",
",",
"category_id",
",",
"*",
"*",
"kwargs",
")",
":",
"allowed_options",
"=",
"[",
"'idnumber'",
",",
"'summaryformat'",
",",
"'format'",
",",
"'showgrades'",
",",
"'newsitems'",
",",
"'startdate'",
",",
"'maxbytes'",
",",
"'showreports'",
",",
"'visible'",
",",
"'groupmode'",
",",
"'groupmodeforce'",
",",
"'jdefaultgroupingid'",
",",
"'enablecompletion'",
",",
"'completionstartonenrol'",
",",
"'completionnotify'",
",",
"'lang'",
",",
"'forcetheme'",
"]",
"if",
"valid_options",
"(",
"kwargs",
",",
"allowed_options",
")",
":",
"option_params",
"=",
"{",
"}",
"for",
"index",
",",
"key",
"in",
"enumerate",
"(",
"kwargs",
")",
":",
"val",
"=",
"kwargs",
".",
"get",
"(",
"key",
")",
"if",
"isinstance",
"(",
"val",
",",
"bool",
")",
":",
"val",
"=",
"int",
"(",
"val",
")",
"option_params",
".",
"update",
"(",
"{",
"'courses[0]['",
"+",
"key",
"+",
"']'",
":",
"val",
"}",
")",
"params",
"=",
"{",
"'wsfunction'",
":",
"'core_course_create_courses'",
",",
"'courses[0][fullname]'",
":",
"fullname",
",",
"'courses[0][shortname]'",
":",
"shortname",
",",
"'courses[0][categoryid]'",
":",
"category_id",
"}",
"params",
".",
"update",
"(",
"option_params",
")",
"params",
".",
"update",
"(",
"self",
".",
"request_params",
")",
"return",
"requests",
".",
"post",
"(",
"self",
".",
"api_url",
",",
"params",
"=",
"params",
",",
"verify",
"=",
"False",
")"
]
| Create a new course
:param string fullname: The course's fullname
:param string shortname: The course's shortname
:param int category_id: The course's category
:keyword string idnumber: (optional) Course ID number. \
Yes, it's a string, blame Moodle.
:keyword int summaryformat: (optional) Defaults to 1 (HTML). \
Summary format options: (1 = HTML, 0 = Moodle, 2 = Plain, \
or 4 = Markdown)
:keyword string format: (optional) Defaults to "topics"
Topic options: (weeks, topics, social, site)
:keyword bool showgrades: (optional) Defaults to True. \
Determines if grades are shown
:keyword int newsitems: (optional) Defaults to 5. \
Number of recent items appearing on the course page
:keyword bool startdate: (optional) Timestamp when the course start
:keyword int maxbytes: (optional) Defaults to 83886080. \
Largest size of file that can be uploaded into the course
:keyword bool showreports: Default to True. Are activity report shown?
:keyword bool visible: (optional) Determines if course is \
visible to students
:keyword int groupmode: (optional) Defaults to 2.
options: (0 = no group, 1 = separate, 2 = visible)
:keyword bool groupmodeforce: (optional) Defaults to False. \
Force group mode
:keyword int defaultgroupingid: (optional) Defaults to 0. \
Default grouping id
:keyword bool enablecompletion: (optional) Enable control via \
completion in activity settings.
:keyword bool completionstartonenrol: (optional) \
Begin tracking a student's progress in course completion after
:keyword bool completionnotify: (optional) Default? Dunno. \
Presumably notifies course completion
:keyword string lang: (optional) Force course language.
:keyword string forcetheme: (optional) Name of the force theme
Example Usage::
>>> import muddle
>>> muddle.course().create('a new course', 'new-course', 20) | [
"Create",
"a",
"new",
"course"
]
| python | train | 45.44 |
inspirehep/harvesting-kit | harvestingkit/oup_package.py | https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/oup_package.py#L197-L225 | def _extract_packages(self):
"""
Extract a package in a new directory.
"""
if not hasattr(self, "retrieved_packages_unpacked"):
self.retrieved_packages_unpacked = [self.package_name]
for path in self.retrieved_packages_unpacked:
package_name = basename(path)
self.path_unpacked = join(CFG_UNPACKED_FILES,
package_name.split('.')[0])
self.logger.debug("Extracting package: %s"
% (path.split("/")[-1],))
try:
if "_archival_pdf" in self.path_unpacked:
self.path_unpacked = (self.path_unpacked
.rstrip("_archival_pdf"))
ZipFile(path).extractall(join(self.path_unpacked,
"archival_pdfs"))
else:
ZipFile(path).extractall(self.path_unpacked)
#TarFile.open(path).extractall(self.path_unpacked)
except Exception:
register_exception(alert_admin=True,
prefix="OUP error extracting package.")
self.logger.error("Error extraction package file: %s"
% (path,))
if hasattr(self, "path_unpacked"):
return self.path_unpacked | [
"def",
"_extract_packages",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"retrieved_packages_unpacked\"",
")",
":",
"self",
".",
"retrieved_packages_unpacked",
"=",
"[",
"self",
".",
"package_name",
"]",
"for",
"path",
"in",
"self",
".",
"retrieved_packages_unpacked",
":",
"package_name",
"=",
"basename",
"(",
"path",
")",
"self",
".",
"path_unpacked",
"=",
"join",
"(",
"CFG_UNPACKED_FILES",
",",
"package_name",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Extracting package: %s\"",
"%",
"(",
"path",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
",",
")",
")",
"try",
":",
"if",
"\"_archival_pdf\"",
"in",
"self",
".",
"path_unpacked",
":",
"self",
".",
"path_unpacked",
"=",
"(",
"self",
".",
"path_unpacked",
".",
"rstrip",
"(",
"\"_archival_pdf\"",
")",
")",
"ZipFile",
"(",
"path",
")",
".",
"extractall",
"(",
"join",
"(",
"self",
".",
"path_unpacked",
",",
"\"archival_pdfs\"",
")",
")",
"else",
":",
"ZipFile",
"(",
"path",
")",
".",
"extractall",
"(",
"self",
".",
"path_unpacked",
")",
"#TarFile.open(path).extractall(self.path_unpacked)",
"except",
"Exception",
":",
"register_exception",
"(",
"alert_admin",
"=",
"True",
",",
"prefix",
"=",
"\"OUP error extracting package.\"",
")",
"self",
".",
"logger",
".",
"error",
"(",
"\"Error extraction package file: %s\"",
"%",
"(",
"path",
",",
")",
")",
"if",
"hasattr",
"(",
"self",
",",
"\"path_unpacked\"",
")",
":",
"return",
"self",
".",
"path_unpacked"
]
| Extract a package in a new directory. | [
"Extract",
"a",
"package",
"in",
"a",
"new",
"directory",
"."
]
| python | valid | 47.586207 |
nimeshkverma/GitArt | GitArt/Heart.py | https://github.com/nimeshkverma/GitArt/blob/a9dadec08542218aac6b193d6ca73f98127c7096/GitArt/Heart.py#L84-L106 | def do_commits(self):
"""
Perform len(MARKED_DAYS)*self.max_commits and Push to the Repository
"""
git_clone_command = "git clone " + str(self.git_repo_url)
subprocess.call(git_clone_command, shell=True)
subprocess.check_call(
['touch', 'gitHeart.txt'], cwd=self.repository_name)
self.append_onto_file(self.repository_name+"/gitHeart.txt", HEADER)
subprocess.check_call(
['git', 'add', 'gitHeart.txt'], cwd=self.repository_name)
subprocess.check_call(
['git', 'commit', '-m', '"Commit Number 0"'], cwd=self.repository_name)
for commit_number in range(1, len(MARKED_DAYS)*self.max_commits+1):
heart_msg = HEART.format(commit_number=str(commit_number))
self.append_onto_file(
self.repository_name+"/gitHeart.txt", heart_msg)
subprocess.check_call(
['git', 'add', 'gitHeart.txt'], cwd=self.repository_name)
subprocess.check_call(['git', 'commit', '-m', '"Commit Number {commit_number}"'.format(
commit_number=commit_number)], cwd=self.repository_name)
subprocess.check_call(
['git', 'push', 'origin', 'master'], cwd=self.repository_name) | [
"def",
"do_commits",
"(",
"self",
")",
":",
"git_clone_command",
"=",
"\"git clone \"",
"+",
"str",
"(",
"self",
".",
"git_repo_url",
")",
"subprocess",
".",
"call",
"(",
"git_clone_command",
",",
"shell",
"=",
"True",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'touch'",
",",
"'gitHeart.txt'",
"]",
",",
"cwd",
"=",
"self",
".",
"repository_name",
")",
"self",
".",
"append_onto_file",
"(",
"self",
".",
"repository_name",
"+",
"\"/gitHeart.txt\"",
",",
"HEADER",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'add'",
",",
"'gitHeart.txt'",
"]",
",",
"cwd",
"=",
"self",
".",
"repository_name",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'commit'",
",",
"'-m'",
",",
"'\"Commit Number 0\"'",
"]",
",",
"cwd",
"=",
"self",
".",
"repository_name",
")",
"for",
"commit_number",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"MARKED_DAYS",
")",
"*",
"self",
".",
"max_commits",
"+",
"1",
")",
":",
"heart_msg",
"=",
"HEART",
".",
"format",
"(",
"commit_number",
"=",
"str",
"(",
"commit_number",
")",
")",
"self",
".",
"append_onto_file",
"(",
"self",
".",
"repository_name",
"+",
"\"/gitHeart.txt\"",
",",
"heart_msg",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'add'",
",",
"'gitHeart.txt'",
"]",
",",
"cwd",
"=",
"self",
".",
"repository_name",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'commit'",
",",
"'-m'",
",",
"'\"Commit Number {commit_number}\"'",
".",
"format",
"(",
"commit_number",
"=",
"commit_number",
")",
"]",
",",
"cwd",
"=",
"self",
".",
"repository_name",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'git'",
",",
"'push'",
",",
"'origin'",
",",
"'master'",
"]",
",",
"cwd",
"=",
"self",
".",
"repository_name",
")"
]
| Perform len(MARKED_DAYS)*self.max_commits and Push to the Repository | [
"Perform",
"len",
"(",
"MARKED_DAYS",
")",
"*",
"self",
".",
"max_commits",
"and",
"Push",
"to",
"the",
"Repository"
]
| python | train | 54.26087 |
flo-compbio/genometools | genometools/ensembl/annotations.py | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/ensembl/annotations.py#L425-L448 | def get_protein_coding_genes(
path_or_buffer,
include_polymorphic_pseudogenes=True,
remove_duplicates=True,
**kwargs):
r"""Get list of all protein-coding genes based on Ensembl GTF file.
Parameters
----------
See :func:`get_genes` function.
Returns
-------
`pandas.DataFrame`
Table with rows corresponding to protein-coding genes.
"""
valid_biotypes = set(['protein_coding'])
if include_polymorphic_pseudogenes:
valid_biotypes.add('polymorphic_pseudogene')
df = get_genes(path_or_buffer, valid_biotypes,
remove_duplicates=remove_duplicates, **kwargs)
return df | [
"def",
"get_protein_coding_genes",
"(",
"path_or_buffer",
",",
"include_polymorphic_pseudogenes",
"=",
"True",
",",
"remove_duplicates",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"valid_biotypes",
"=",
"set",
"(",
"[",
"'protein_coding'",
"]",
")",
"if",
"include_polymorphic_pseudogenes",
":",
"valid_biotypes",
".",
"add",
"(",
"'polymorphic_pseudogene'",
")",
"df",
"=",
"get_genes",
"(",
"path_or_buffer",
",",
"valid_biotypes",
",",
"remove_duplicates",
"=",
"remove_duplicates",
",",
"*",
"*",
"kwargs",
")",
"return",
"df"
]
| r"""Get list of all protein-coding genes based on Ensembl GTF file.
Parameters
----------
See :func:`get_genes` function.
Returns
-------
`pandas.DataFrame`
Table with rows corresponding to protein-coding genes. | [
"r",
"Get",
"list",
"of",
"all",
"protein",
"-",
"coding",
"genes",
"based",
"on",
"Ensembl",
"GTF",
"file",
".",
"Parameters",
"----------",
"See",
":",
"func",
":",
"get_genes",
"function",
"."
]
| python | train | 27.708333 |
cloudera/cm_api | python/src/cm_api/endpoints/host_templates.py | https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/host_templates.py#L53-L62 | def get_all_host_templates(resource_root, cluster_name="default"):
"""
Get all host templates in a cluster.
@param cluster_name: Cluster name.
@return: ApiList of ApiHostTemplate objects for all host templates in a cluster.
@since: API v3
"""
return call(resource_root.get,
HOST_TEMPLATES_PATH % (cluster_name,),
ApiHostTemplate, True, api_version=3) | [
"def",
"get_all_host_templates",
"(",
"resource_root",
",",
"cluster_name",
"=",
"\"default\"",
")",
":",
"return",
"call",
"(",
"resource_root",
".",
"get",
",",
"HOST_TEMPLATES_PATH",
"%",
"(",
"cluster_name",
",",
")",
",",
"ApiHostTemplate",
",",
"True",
",",
"api_version",
"=",
"3",
")"
]
| Get all host templates in a cluster.
@param cluster_name: Cluster name.
@return: ApiList of ApiHostTemplate objects for all host templates in a cluster.
@since: API v3 | [
"Get",
"all",
"host",
"templates",
"in",
"a",
"cluster",
"."
]
| python | train | 36.7 |
mitsei/dlkit | dlkit/records/assessment/orthographic_visualization/orthographic_records.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/orthographic_visualization/orthographic_records.py#L566-L582 | def _init_metadata(self):
"""stub"""
super(EulerRotationAnswerFormRecord, self)._init_metadata()
self._euler_rotation_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'angle_values'),
'element_label': 'Euler Angle Values',
'instructions': 'Provide X, Y, and Z euler angle rotation values',
'required': True,
'read_only': False,
'linked': True,
'array': False,
'default_object_values': [{}],
'syntax': 'OBJECT',
'object_set': []
} | [
"def",
"_init_metadata",
"(",
"self",
")",
":",
"super",
"(",
"EulerRotationAnswerFormRecord",
",",
"self",
")",
".",
"_init_metadata",
"(",
")",
"self",
".",
"_euler_rotation_metadata",
"=",
"{",
"'element_id'",
":",
"Id",
"(",
"self",
".",
"my_osid_object_form",
".",
"_authority",
",",
"self",
".",
"my_osid_object_form",
".",
"_namespace",
",",
"'angle_values'",
")",
",",
"'element_label'",
":",
"'Euler Angle Values'",
",",
"'instructions'",
":",
"'Provide X, Y, and Z euler angle rotation values'",
",",
"'required'",
":",
"True",
",",
"'read_only'",
":",
"False",
",",
"'linked'",
":",
"True",
",",
"'array'",
":",
"False",
",",
"'default_object_values'",
":",
"[",
"{",
"}",
"]",
",",
"'syntax'",
":",
"'OBJECT'",
",",
"'object_set'",
":",
"[",
"]",
"}"
]
| stub | [
"stub"
]
| python | train | 39.882353 |
proycon/pynlpl | pynlpl/formats/folia.py | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L4337-L4371 | def append(self, child, *args, **kwargs):
"""See :meth:`AbstractElement.append`"""
#Accept Word instances instead of WordReference, references will be automagically used upon serialisation
if isinstance(child, (Word, Morpheme, Phoneme)) and WordReference in self.ACCEPTED_DATA:
#We don't really append but do an insertion so all references are in proper order
insertionpoint = len(self.data)
for i, sibling in enumerate(self.data):
if isinstance(sibling, (Word, Morpheme, Phoneme)):
try:
if not sibling.precedes(child):
insertionpoint = i
except: #happens if we can't determine common ancestors
pass
self.data.insert(insertionpoint, child)
return child
elif isinstance(child, AbstractSpanAnnotation): #(covers span roles just as well)
insertionpoint = len(self.data)
try:
firstword = child.wrefs(0)
except IndexError:
#we have no basis to determine an insertionpoint for this child, just append it then
return super(AbstractSpanAnnotation,self).append(child, *args, **kwargs)
insertionpoint = len(self.data)
for i, sibling in enumerate(self.data):
if isinstance(sibling, (Word, Morpheme, Phoneme)):
try:
if not sibling.precedes(firstword):
insertionpoint = i
except: #happens if we can't determine common ancestors
pass
return super(AbstractSpanAnnotation,self).insert(insertionpoint, child, *args, **kwargs)
else:
return super(AbstractSpanAnnotation,self).append(child, *args, **kwargs) | [
"def",
"append",
"(",
"self",
",",
"child",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"#Accept Word instances instead of WordReference, references will be automagically used upon serialisation",
"if",
"isinstance",
"(",
"child",
",",
"(",
"Word",
",",
"Morpheme",
",",
"Phoneme",
")",
")",
"and",
"WordReference",
"in",
"self",
".",
"ACCEPTED_DATA",
":",
"#We don't really append but do an insertion so all references are in proper order",
"insertionpoint",
"=",
"len",
"(",
"self",
".",
"data",
")",
"for",
"i",
",",
"sibling",
"in",
"enumerate",
"(",
"self",
".",
"data",
")",
":",
"if",
"isinstance",
"(",
"sibling",
",",
"(",
"Word",
",",
"Morpheme",
",",
"Phoneme",
")",
")",
":",
"try",
":",
"if",
"not",
"sibling",
".",
"precedes",
"(",
"child",
")",
":",
"insertionpoint",
"=",
"i",
"except",
":",
"#happens if we can't determine common ancestors",
"pass",
"self",
".",
"data",
".",
"insert",
"(",
"insertionpoint",
",",
"child",
")",
"return",
"child",
"elif",
"isinstance",
"(",
"child",
",",
"AbstractSpanAnnotation",
")",
":",
"#(covers span roles just as well)",
"insertionpoint",
"=",
"len",
"(",
"self",
".",
"data",
")",
"try",
":",
"firstword",
"=",
"child",
".",
"wrefs",
"(",
"0",
")",
"except",
"IndexError",
":",
"#we have no basis to determine an insertionpoint for this child, just append it then",
"return",
"super",
"(",
"AbstractSpanAnnotation",
",",
"self",
")",
".",
"append",
"(",
"child",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"insertionpoint",
"=",
"len",
"(",
"self",
".",
"data",
")",
"for",
"i",
",",
"sibling",
"in",
"enumerate",
"(",
"self",
".",
"data",
")",
":",
"if",
"isinstance",
"(",
"sibling",
",",
"(",
"Word",
",",
"Morpheme",
",",
"Phoneme",
")",
")",
":",
"try",
":",
"if",
"not",
"sibling",
".",
"precedes",
"(",
"firstword",
")",
":",
"insertionpoint",
"=",
"i",
"except",
":",
"#happens if we can't determine common ancestors",
"pass",
"return",
"super",
"(",
"AbstractSpanAnnotation",
",",
"self",
")",
".",
"insert",
"(",
"insertionpoint",
",",
"child",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"super",
"(",
"AbstractSpanAnnotation",
",",
"self",
")",
".",
"append",
"(",
"child",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
]
| See :meth:`AbstractElement.append` | [
"See",
":",
"meth",
":",
"AbstractElement",
".",
"append"
]
| python | train | 52.885714 |
pandas-dev/pandas | pandas/core/generic.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5886-L5930 | def convert_objects(self, convert_dates=True, convert_numeric=False,
convert_timedeltas=True, copy=True):
"""
Attempt to infer better dtype for object columns.
.. deprecated:: 0.21.0
Parameters
----------
convert_dates : boolean, default True
If True, convert to date where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
convert_numeric : boolean, default False
If True, attempt to coerce to numbers (including strings), with
unconvertible values becoming NaN.
convert_timedeltas : boolean, default True
If True, convert to timedelta where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
"""
msg = ("convert_objects is deprecated. To re-infer data dtypes for "
"object columns, use {klass}.infer_objects()\nFor all "
"other conversions use the data-type specific converters "
"pd.to_datetime, pd.to_timedelta and pd.to_numeric."
).format(klass=self.__class__.__name__)
warnings.warn(msg, FutureWarning, stacklevel=2)
return self._constructor(
self._data.convert(convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
copy=copy)).__finalize__(self) | [
"def",
"convert_objects",
"(",
"self",
",",
"convert_dates",
"=",
"True",
",",
"convert_numeric",
"=",
"False",
",",
"convert_timedeltas",
"=",
"True",
",",
"copy",
"=",
"True",
")",
":",
"msg",
"=",
"(",
"\"convert_objects is deprecated. To re-infer data dtypes for \"",
"\"object columns, use {klass}.infer_objects()\\nFor all \"",
"\"other conversions use the data-type specific converters \"",
"\"pd.to_datetime, pd.to_timedelta and pd.to_numeric.\"",
")",
".",
"format",
"(",
"klass",
"=",
"self",
".",
"__class__",
".",
"__name__",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"self",
".",
"_constructor",
"(",
"self",
".",
"_data",
".",
"convert",
"(",
"convert_dates",
"=",
"convert_dates",
",",
"convert_numeric",
"=",
"convert_numeric",
",",
"convert_timedeltas",
"=",
"convert_timedeltas",
",",
"copy",
"=",
"copy",
")",
")",
".",
"__finalize__",
"(",
"self",
")"
]
| Attempt to infer better dtype for object columns.
.. deprecated:: 0.21.0
Parameters
----------
convert_dates : boolean, default True
If True, convert to date where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
convert_numeric : boolean, default False
If True, attempt to coerce to numbers (including strings), with
unconvertible values becoming NaN.
convert_timedeltas : boolean, default True
If True, convert to timedelta where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type. | [
"Attempt",
"to",
"infer",
"better",
"dtype",
"for",
"object",
"columns",
"."
]
| python | train | 44.044444 |
ray-project/ray | python/ray/experimental/sgd/tfbench/convnet_builder.py | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/sgd/tfbench/convnet_builder.py#L143-L243 | def conv(self,
num_out_channels,
k_height,
k_width,
d_height=1,
d_width=1,
mode="SAME",
input_layer=None,
num_channels_in=None,
use_batch_norm=None,
stddev=None,
activation="relu",
bias=0.0):
"""Construct a conv2d layer on top of cnn."""
if input_layer is None:
input_layer = self.top_layer
if num_channels_in is None:
num_channels_in = self.top_size
kernel_initializer = None
if stddev is not None:
kernel_initializer = tf.truncated_normal_initializer(stddev=stddev)
name = "conv" + str(self.counts["conv"])
self.counts["conv"] += 1
with tf.variable_scope(name):
strides = [1, d_height, d_width, 1]
if self.data_format == "NCHW":
strides = [strides[0], strides[3], strides[1], strides[2]]
if mode != "SAME_RESNET":
conv = self._conv2d_impl(
input_layer,
num_channels_in,
num_out_channels,
kernel_size=[k_height, k_width],
strides=[d_height, d_width],
padding=mode,
kernel_initializer=kernel_initializer)
else: # Special padding mode for ResNet models
if d_height == 1 and d_width == 1:
conv = self._conv2d_impl(
input_layer,
num_channels_in,
num_out_channels,
kernel_size=[k_height, k_width],
strides=[d_height, d_width],
padding="SAME",
kernel_initializer=kernel_initializer)
else:
rate = 1 # Unused (for 'a trous' convolutions)
kernel_height_effective = k_height + (k_height - 1) * (
rate - 1)
pad_h_beg = (kernel_height_effective - 1) // 2
pad_h_end = kernel_height_effective - 1 - pad_h_beg
kernel_width_effective = k_width + (k_width - 1) * (
rate - 1)
pad_w_beg = (kernel_width_effective - 1) // 2
pad_w_end = kernel_width_effective - 1 - pad_w_beg
padding = [[0, 0], [pad_h_beg, pad_h_end],
[pad_w_beg, pad_w_end], [0, 0]]
if self.data_format == "NCHW":
padding = [
padding[0], padding[3], padding[1], padding[2]
]
input_layer = tf.pad(input_layer, padding)
conv = self._conv2d_impl(
input_layer,
num_channels_in,
num_out_channels,
kernel_size=[k_height, k_width],
strides=[d_height, d_width],
padding="VALID",
kernel_initializer=kernel_initializer)
if use_batch_norm is None:
use_batch_norm = self.use_batch_norm
if not use_batch_norm:
if bias is not None:
biases = self.get_variable(
"biases", [num_out_channels],
self.variable_dtype,
self.dtype,
initializer=tf.constant_initializer(bias))
biased = tf.reshape(
tf.nn.bias_add(
conv, biases, data_format=self.data_format),
conv.get_shape())
else:
biased = conv
else:
self.top_layer = conv
self.top_size = num_out_channels
biased = self.batch_norm(**self.batch_norm_config)
if activation == "relu":
conv1 = tf.nn.relu(biased)
elif activation == "linear" or activation is None:
conv1 = biased
elif activation == "tanh":
conv1 = tf.nn.tanh(biased)
else:
raise KeyError("Invalid activation type \"%s\"" % activation)
self.top_layer = conv1
self.top_size = num_out_channels
return conv1 | [
"def",
"conv",
"(",
"self",
",",
"num_out_channels",
",",
"k_height",
",",
"k_width",
",",
"d_height",
"=",
"1",
",",
"d_width",
"=",
"1",
",",
"mode",
"=",
"\"SAME\"",
",",
"input_layer",
"=",
"None",
",",
"num_channels_in",
"=",
"None",
",",
"use_batch_norm",
"=",
"None",
",",
"stddev",
"=",
"None",
",",
"activation",
"=",
"\"relu\"",
",",
"bias",
"=",
"0.0",
")",
":",
"if",
"input_layer",
"is",
"None",
":",
"input_layer",
"=",
"self",
".",
"top_layer",
"if",
"num_channels_in",
"is",
"None",
":",
"num_channels_in",
"=",
"self",
".",
"top_size",
"kernel_initializer",
"=",
"None",
"if",
"stddev",
"is",
"not",
"None",
":",
"kernel_initializer",
"=",
"tf",
".",
"truncated_normal_initializer",
"(",
"stddev",
"=",
"stddev",
")",
"name",
"=",
"\"conv\"",
"+",
"str",
"(",
"self",
".",
"counts",
"[",
"\"conv\"",
"]",
")",
"self",
".",
"counts",
"[",
"\"conv\"",
"]",
"+=",
"1",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
")",
":",
"strides",
"=",
"[",
"1",
",",
"d_height",
",",
"d_width",
",",
"1",
"]",
"if",
"self",
".",
"data_format",
"==",
"\"NCHW\"",
":",
"strides",
"=",
"[",
"strides",
"[",
"0",
"]",
",",
"strides",
"[",
"3",
"]",
",",
"strides",
"[",
"1",
"]",
",",
"strides",
"[",
"2",
"]",
"]",
"if",
"mode",
"!=",
"\"SAME_RESNET\"",
":",
"conv",
"=",
"self",
".",
"_conv2d_impl",
"(",
"input_layer",
",",
"num_channels_in",
",",
"num_out_channels",
",",
"kernel_size",
"=",
"[",
"k_height",
",",
"k_width",
"]",
",",
"strides",
"=",
"[",
"d_height",
",",
"d_width",
"]",
",",
"padding",
"=",
"mode",
",",
"kernel_initializer",
"=",
"kernel_initializer",
")",
"else",
":",
"# Special padding mode for ResNet models",
"if",
"d_height",
"==",
"1",
"and",
"d_width",
"==",
"1",
":",
"conv",
"=",
"self",
".",
"_conv2d_impl",
"(",
"input_layer",
",",
"num_channels_in",
",",
"num_out_channels",
",",
"kernel_size",
"=",
"[",
"k_height",
",",
"k_width",
"]",
",",
"strides",
"=",
"[",
"d_height",
",",
"d_width",
"]",
",",
"padding",
"=",
"\"SAME\"",
",",
"kernel_initializer",
"=",
"kernel_initializer",
")",
"else",
":",
"rate",
"=",
"1",
"# Unused (for 'a trous' convolutions)",
"kernel_height_effective",
"=",
"k_height",
"+",
"(",
"k_height",
"-",
"1",
")",
"*",
"(",
"rate",
"-",
"1",
")",
"pad_h_beg",
"=",
"(",
"kernel_height_effective",
"-",
"1",
")",
"//",
"2",
"pad_h_end",
"=",
"kernel_height_effective",
"-",
"1",
"-",
"pad_h_beg",
"kernel_width_effective",
"=",
"k_width",
"+",
"(",
"k_width",
"-",
"1",
")",
"*",
"(",
"rate",
"-",
"1",
")",
"pad_w_beg",
"=",
"(",
"kernel_width_effective",
"-",
"1",
")",
"//",
"2",
"pad_w_end",
"=",
"kernel_width_effective",
"-",
"1",
"-",
"pad_w_beg",
"padding",
"=",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"pad_h_beg",
",",
"pad_h_end",
"]",
",",
"[",
"pad_w_beg",
",",
"pad_w_end",
"]",
",",
"[",
"0",
",",
"0",
"]",
"]",
"if",
"self",
".",
"data_format",
"==",
"\"NCHW\"",
":",
"padding",
"=",
"[",
"padding",
"[",
"0",
"]",
",",
"padding",
"[",
"3",
"]",
",",
"padding",
"[",
"1",
"]",
",",
"padding",
"[",
"2",
"]",
"]",
"input_layer",
"=",
"tf",
".",
"pad",
"(",
"input_layer",
",",
"padding",
")",
"conv",
"=",
"self",
".",
"_conv2d_impl",
"(",
"input_layer",
",",
"num_channels_in",
",",
"num_out_channels",
",",
"kernel_size",
"=",
"[",
"k_height",
",",
"k_width",
"]",
",",
"strides",
"=",
"[",
"d_height",
",",
"d_width",
"]",
",",
"padding",
"=",
"\"VALID\"",
",",
"kernel_initializer",
"=",
"kernel_initializer",
")",
"if",
"use_batch_norm",
"is",
"None",
":",
"use_batch_norm",
"=",
"self",
".",
"use_batch_norm",
"if",
"not",
"use_batch_norm",
":",
"if",
"bias",
"is",
"not",
"None",
":",
"biases",
"=",
"self",
".",
"get_variable",
"(",
"\"biases\"",
",",
"[",
"num_out_channels",
"]",
",",
"self",
".",
"variable_dtype",
",",
"self",
".",
"dtype",
",",
"initializer",
"=",
"tf",
".",
"constant_initializer",
"(",
"bias",
")",
")",
"biased",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"nn",
".",
"bias_add",
"(",
"conv",
",",
"biases",
",",
"data_format",
"=",
"self",
".",
"data_format",
")",
",",
"conv",
".",
"get_shape",
"(",
")",
")",
"else",
":",
"biased",
"=",
"conv",
"else",
":",
"self",
".",
"top_layer",
"=",
"conv",
"self",
".",
"top_size",
"=",
"num_out_channels",
"biased",
"=",
"self",
".",
"batch_norm",
"(",
"*",
"*",
"self",
".",
"batch_norm_config",
")",
"if",
"activation",
"==",
"\"relu\"",
":",
"conv1",
"=",
"tf",
".",
"nn",
".",
"relu",
"(",
"biased",
")",
"elif",
"activation",
"==",
"\"linear\"",
"or",
"activation",
"is",
"None",
":",
"conv1",
"=",
"biased",
"elif",
"activation",
"==",
"\"tanh\"",
":",
"conv1",
"=",
"tf",
".",
"nn",
".",
"tanh",
"(",
"biased",
")",
"else",
":",
"raise",
"KeyError",
"(",
"\"Invalid activation type \\\"%s\\\"\"",
"%",
"activation",
")",
"self",
".",
"top_layer",
"=",
"conv1",
"self",
".",
"top_size",
"=",
"num_out_channels",
"return",
"conv1"
]
| Construct a conv2d layer on top of cnn. | [
"Construct",
"a",
"conv2d",
"layer",
"on",
"top",
"of",
"cnn",
"."
]
| python | train | 43.49505 |
OpenGov/og-python-utils | ogutils/collections/checks.py | https://github.com/OpenGov/og-python-utils/blob/00f44927383dd1bd6348f47302c4453d56963479/ogutils/collections/checks.py#L13-L21 | def any_shared(enum_one, enum_two):
'''
Truthy if any element in enum_one is present in enum_two
'''
if not is_collection(enum_one) or not is_collection(enum_two):
return False
enum_one = enum_one if isinstance(enum_one, (set, dict)) else set(enum_one)
enum_two = enum_two if isinstance(enum_two, (set, dict)) else set(enum_two)
return any(e in enum_two for e in enum_one) | [
"def",
"any_shared",
"(",
"enum_one",
",",
"enum_two",
")",
":",
"if",
"not",
"is_collection",
"(",
"enum_one",
")",
"or",
"not",
"is_collection",
"(",
"enum_two",
")",
":",
"return",
"False",
"enum_one",
"=",
"enum_one",
"if",
"isinstance",
"(",
"enum_one",
",",
"(",
"set",
",",
"dict",
")",
")",
"else",
"set",
"(",
"enum_one",
")",
"enum_two",
"=",
"enum_two",
"if",
"isinstance",
"(",
"enum_two",
",",
"(",
"set",
",",
"dict",
")",
")",
"else",
"set",
"(",
"enum_two",
")",
"return",
"any",
"(",
"e",
"in",
"enum_two",
"for",
"e",
"in",
"enum_one",
")"
]
| Truthy if any element in enum_one is present in enum_two | [
"Truthy",
"if",
"any",
"element",
"in",
"enum_one",
"is",
"present",
"in",
"enum_two"
]
| python | train | 45.333333 |
icgood/pymap | pymap/parsing/response/code.py | https://github.com/icgood/pymap/blob/e77d9a54d760e3cbe044a548883bb4299ed61dc2/pymap/parsing/response/code.py#L29-L35 | def string(self) -> bytes:
"""The capabilities string without the enclosing square brackets."""
if self._raw is not None:
return self._raw
self._raw = raw = BytesFormat(b' ').join(
[b'CAPABILITY', b'IMAP4rev1'] + self.capabilities)
return raw | [
"def",
"string",
"(",
"self",
")",
"->",
"bytes",
":",
"if",
"self",
".",
"_raw",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_raw",
"self",
".",
"_raw",
"=",
"raw",
"=",
"BytesFormat",
"(",
"b' '",
")",
".",
"join",
"(",
"[",
"b'CAPABILITY'",
",",
"b'IMAP4rev1'",
"]",
"+",
"self",
".",
"capabilities",
")",
"return",
"raw"
]
| The capabilities string without the enclosing square brackets. | [
"The",
"capabilities",
"string",
"without",
"the",
"enclosing",
"square",
"brackets",
"."
]
| python | train | 41.714286 |
open-mmlab/mmcv | mmcv/runner/runner.py | https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/runner/runner.py#L194-L215 | def register_hook(self, hook, priority='NORMAL'):
"""Register a hook into the hook list.
Args:
hook (:obj:`Hook`): The hook to be registered.
priority (int or str or :obj:`Priority`): Hook priority.
Lower value means higher priority.
"""
assert isinstance(hook, Hook)
if hasattr(hook, 'priority'):
raise ValueError('"priority" is a reserved attribute for hooks')
priority = get_priority(priority)
hook.priority = priority
# insert the hook to a sorted list
inserted = False
for i in range(len(self._hooks) - 1, -1, -1):
if priority >= self._hooks[i].priority:
self._hooks.insert(i + 1, hook)
inserted = True
break
if not inserted:
self._hooks.insert(0, hook) | [
"def",
"register_hook",
"(",
"self",
",",
"hook",
",",
"priority",
"=",
"'NORMAL'",
")",
":",
"assert",
"isinstance",
"(",
"hook",
",",
"Hook",
")",
"if",
"hasattr",
"(",
"hook",
",",
"'priority'",
")",
":",
"raise",
"ValueError",
"(",
"'\"priority\" is a reserved attribute for hooks'",
")",
"priority",
"=",
"get_priority",
"(",
"priority",
")",
"hook",
".",
"priority",
"=",
"priority",
"# insert the hook to a sorted list",
"inserted",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"_hooks",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"if",
"priority",
">=",
"self",
".",
"_hooks",
"[",
"i",
"]",
".",
"priority",
":",
"self",
".",
"_hooks",
".",
"insert",
"(",
"i",
"+",
"1",
",",
"hook",
")",
"inserted",
"=",
"True",
"break",
"if",
"not",
"inserted",
":",
"self",
".",
"_hooks",
".",
"insert",
"(",
"0",
",",
"hook",
")"
]
| Register a hook into the hook list.
Args:
hook (:obj:`Hook`): The hook to be registered.
priority (int or str or :obj:`Priority`): Hook priority.
Lower value means higher priority. | [
"Register",
"a",
"hook",
"into",
"the",
"hook",
"list",
"."
]
| python | test | 38.636364 |
slickqa/python-client | slickqa/micromodels/packages/PySO8601/durations.py | https://github.com/slickqa/python-client/blob/1d36b4977cd4140d7d24917cab2b3f82b60739c2/slickqa/micromodels/packages/PySO8601/durations.py#L58-L83 | def parse_duration(duration):
"""Attepmts to parse an ISO8601 formatted ``duration``.
Returns a ``datetime.timedelta`` object.
"""
duration = str(duration).upper().strip()
elements = ELEMENTS.copy()
for pattern in (SIMPLE_DURATION, COMBINED_DURATION):
if pattern.match(duration):
found = pattern.match(duration).groupdict()
del found['time']
elements.update(dict((k, int(v or 0))
for k, v
in found.items()))
return datetime.timedelta(days=(elements['days'] +
_months_to_days(elements['months']) +
_years_to_days(elements['years'])),
hours=elements['hours'],
minutes=elements['minutes'],
seconds=elements['seconds'])
return ParseError() | [
"def",
"parse_duration",
"(",
"duration",
")",
":",
"duration",
"=",
"str",
"(",
"duration",
")",
".",
"upper",
"(",
")",
".",
"strip",
"(",
")",
"elements",
"=",
"ELEMENTS",
".",
"copy",
"(",
")",
"for",
"pattern",
"in",
"(",
"SIMPLE_DURATION",
",",
"COMBINED_DURATION",
")",
":",
"if",
"pattern",
".",
"match",
"(",
"duration",
")",
":",
"found",
"=",
"pattern",
".",
"match",
"(",
"duration",
")",
".",
"groupdict",
"(",
")",
"del",
"found",
"[",
"'time'",
"]",
"elements",
".",
"update",
"(",
"dict",
"(",
"(",
"k",
",",
"int",
"(",
"v",
"or",
"0",
")",
")",
"for",
"k",
",",
"v",
"in",
"found",
".",
"items",
"(",
")",
")",
")",
"return",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"(",
"elements",
"[",
"'days'",
"]",
"+",
"_months_to_days",
"(",
"elements",
"[",
"'months'",
"]",
")",
"+",
"_years_to_days",
"(",
"elements",
"[",
"'years'",
"]",
")",
")",
",",
"hours",
"=",
"elements",
"[",
"'hours'",
"]",
",",
"minutes",
"=",
"elements",
"[",
"'minutes'",
"]",
",",
"seconds",
"=",
"elements",
"[",
"'seconds'",
"]",
")",
"return",
"ParseError",
"(",
")"
]
| Attepmts to parse an ISO8601 formatted ``duration``.
Returns a ``datetime.timedelta`` object. | [
"Attepmts",
"to",
"parse",
"an",
"ISO8601",
"formatted",
"duration",
"."
]
| python | train | 37.846154 |
BD2KGenomics/protect | src/protect/mutation_calling/radia.py | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/radia.py#L40-L59 | def run_radia_with_merge(job, rna_bam, tumor_bam, normal_bam, univ_options, radia_options):
"""
A wrapper for the the entire RADIA sub-graph.
:param dict rna_bam: Dict dicts of bam and bai for tumor RNA-Seq obtained by running STAR within
ProTECT.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:return: fsID to the merged RADIA calls
:rtype: toil.fileStore.FileID
"""
spawn = job.wrapJobFn(run_radia, rna_bam['rna_genome'], tumor_bam,
normal_bam, univ_options, radia_options, disk='100M',
memory='100M').encapsulate()
merge = job.wrapJobFn(merge_perchrom_vcfs, spawn.rv(), univ_options, disk='100M', memory='100M')
job.addChild(spawn)
spawn.addChild(merge)
return merge.rv() | [
"def",
"run_radia_with_merge",
"(",
"job",
",",
"rna_bam",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"radia_options",
")",
":",
"spawn",
"=",
"job",
".",
"wrapJobFn",
"(",
"run_radia",
",",
"rna_bam",
"[",
"'rna_genome'",
"]",
",",
"tumor_bam",
",",
"normal_bam",
",",
"univ_options",
",",
"radia_options",
",",
"disk",
"=",
"'100M'",
",",
"memory",
"=",
"'100M'",
")",
".",
"encapsulate",
"(",
")",
"merge",
"=",
"job",
".",
"wrapJobFn",
"(",
"merge_perchrom_vcfs",
",",
"spawn",
".",
"rv",
"(",
")",
",",
"univ_options",
",",
"disk",
"=",
"'100M'",
",",
"memory",
"=",
"'100M'",
")",
"job",
".",
"addChild",
"(",
"spawn",
")",
"spawn",
".",
"addChild",
"(",
"merge",
")",
"return",
"merge",
".",
"rv",
"(",
")"
]
| A wrapper for the the entire RADIA sub-graph.
:param dict rna_bam: Dict dicts of bam and bai for tumor RNA-Seq obtained by running STAR within
ProTECT.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:return: fsID to the merged RADIA calls
:rtype: toil.fileStore.FileID | [
"A",
"wrapper",
"for",
"the",
"the",
"entire",
"RADIA",
"sub",
"-",
"graph",
"."
]
| python | train | 49.35 |
saltstack/salt | salt/modules/file.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L1320-L1354 | def uncomment(path,
regex,
char='#',
backup='.bak'):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Uncomment specified commented lines in a file
path
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be uncommented.
This regex should not include the comment character. A leading ``^``
character will be stripped for convenience (for easily switching
between comment() and uncomment()).
char : ``#``
The character to remove in order to uncomment a line
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
CLI Example:
.. code-block:: bash
salt '*' file.uncomment /etc/hosts.deny 'ALL: PARANOID'
'''
return comment_line(path=path,
regex=regex,
char=char,
cmnt=False,
backup=backup) | [
"def",
"uncomment",
"(",
"path",
",",
"regex",
",",
"char",
"=",
"'#'",
",",
"backup",
"=",
"'.bak'",
")",
":",
"return",
"comment_line",
"(",
"path",
"=",
"path",
",",
"regex",
"=",
"regex",
",",
"char",
"=",
"char",
",",
"cmnt",
"=",
"False",
",",
"backup",
"=",
"backup",
")"
]
| .. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Uncomment specified commented lines in a file
path
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be uncommented.
This regex should not include the comment character. A leading ``^``
character will be stripped for convenience (for easily switching
between comment() and uncomment()).
char : ``#``
The character to remove in order to uncomment a line
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
CLI Example:
.. code-block:: bash
salt '*' file.uncomment /etc/hosts.deny 'ALL: PARANOID' | [
"..",
"deprecated",
"::",
"0",
".",
"17",
".",
"0",
"Use",
":",
"py",
":",
"func",
":",
"~salt",
".",
"modules",
".",
"file",
".",
"replace",
"instead",
"."
]
| python | train | 32.2 |
scanny/python-pptx | pptx/shapes/autoshape.py | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/shapes/autoshape.py#L116-L126 | def _initialized_adjustments(self, prstGeom):
"""
Return an initialized list of adjustment values based on the contents
of *prstGeom*
"""
if prstGeom is None:
return []
davs = AutoShapeType.default_adjustment_values(prstGeom.prst)
adjustments = [Adjustment(name, def_val) for name, def_val in davs]
self._update_adjustments_with_actuals(adjustments, prstGeom.gd_lst)
return adjustments | [
"def",
"_initialized_adjustments",
"(",
"self",
",",
"prstGeom",
")",
":",
"if",
"prstGeom",
"is",
"None",
":",
"return",
"[",
"]",
"davs",
"=",
"AutoShapeType",
".",
"default_adjustment_values",
"(",
"prstGeom",
".",
"prst",
")",
"adjustments",
"=",
"[",
"Adjustment",
"(",
"name",
",",
"def_val",
")",
"for",
"name",
",",
"def_val",
"in",
"davs",
"]",
"self",
".",
"_update_adjustments_with_actuals",
"(",
"adjustments",
",",
"prstGeom",
".",
"gd_lst",
")",
"return",
"adjustments"
]
| Return an initialized list of adjustment values based on the contents
of *prstGeom* | [
"Return",
"an",
"initialized",
"list",
"of",
"adjustment",
"values",
"based",
"on",
"the",
"contents",
"of",
"*",
"prstGeom",
"*"
]
| python | train | 41.727273 |
Tinche/cattrs | src/cattr/converters.py | https://github.com/Tinche/cattrs/blob/481bc9bdb69b2190d699b54f331c8c5c075506d5/src/cattr/converters.py#L286-L292 | def _structure_attr_from_tuple(self, a, name, value):
"""Handle an individual attrs attribute."""
type_ = a.type
if type_ is None:
# No type metadata.
return value
return self._structure_func.dispatch(type_)(value, type_) | [
"def",
"_structure_attr_from_tuple",
"(",
"self",
",",
"a",
",",
"name",
",",
"value",
")",
":",
"type_",
"=",
"a",
".",
"type",
"if",
"type_",
"is",
"None",
":",
"# No type metadata.",
"return",
"value",
"return",
"self",
".",
"_structure_func",
".",
"dispatch",
"(",
"type_",
")",
"(",
"value",
",",
"type_",
")"
]
| Handle an individual attrs attribute. | [
"Handle",
"an",
"individual",
"attrs",
"attribute",
"."
]
| python | train | 38.714286 |
Tivix/django-common | django_common/decorators.py | https://github.com/Tivix/django-common/blob/407d208121011a8425139e541629554114d96c18/django_common/decorators.py#L14-L30 | def ssl_required(allow_non_ssl=False):
"""
Views decorated with this will always get redirected to https
except when allow_non_ssl is set to true.
"""
def wrapper(view_func):
def _checkssl(request, *args, **kwargs):
# allow_non_ssl=True lets non-https requests to come
# through to this view (and hence not redirect)
if hasattr(settings, 'SSL_ENABLED') and settings.SSL_ENABLED \
and not request.is_secure() and not allow_non_ssl:
return HttpResponseRedirect(
request.build_absolute_uri().replace('http://', 'https://'))
return view_func(request, *args, **kwargs)
return _checkssl
return wrapper | [
"def",
"ssl_required",
"(",
"allow_non_ssl",
"=",
"False",
")",
":",
"def",
"wrapper",
"(",
"view_func",
")",
":",
"def",
"_checkssl",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# allow_non_ssl=True lets non-https requests to come",
"# through to this view (and hence not redirect)",
"if",
"hasattr",
"(",
"settings",
",",
"'SSL_ENABLED'",
")",
"and",
"settings",
".",
"SSL_ENABLED",
"and",
"not",
"request",
".",
"is_secure",
"(",
")",
"and",
"not",
"allow_non_ssl",
":",
"return",
"HttpResponseRedirect",
"(",
"request",
".",
"build_absolute_uri",
"(",
")",
".",
"replace",
"(",
"'http://'",
",",
"'https://'",
")",
")",
"return",
"view_func",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_checkssl",
"return",
"wrapper"
]
| Views decorated with this will always get redirected to https
except when allow_non_ssl is set to true. | [
"Views",
"decorated",
"with",
"this",
"will",
"always",
"get",
"redirected",
"to",
"https",
"except",
"when",
"allow_non_ssl",
"is",
"set",
"to",
"true",
"."
]
| python | train | 42.588235 |
ecordell/pymacaroons | pymacaroons/serializers/json_serializer.py | https://github.com/ecordell/pymacaroons/blob/c941614df15fe732ea432a62788e45410bcb868d/pymacaroons/serializers/json_serializer.py#L128-L140 | def _caveat_v1_to_dict(c):
''' Return a caveat as a dictionary for export as the JSON
macaroon v1 format.
'''
serialized = {}
if len(c.caveat_id) > 0:
serialized['cid'] = c.caveat_id
if c.verification_key_id:
serialized['vid'] = utils.raw_urlsafe_b64encode(
c.verification_key_id).decode('utf-8')
if c.location:
serialized['cl'] = c.location
return serialized | [
"def",
"_caveat_v1_to_dict",
"(",
"c",
")",
":",
"serialized",
"=",
"{",
"}",
"if",
"len",
"(",
"c",
".",
"caveat_id",
")",
">",
"0",
":",
"serialized",
"[",
"'cid'",
"]",
"=",
"c",
".",
"caveat_id",
"if",
"c",
".",
"verification_key_id",
":",
"serialized",
"[",
"'vid'",
"]",
"=",
"utils",
".",
"raw_urlsafe_b64encode",
"(",
"c",
".",
"verification_key_id",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"c",
".",
"location",
":",
"serialized",
"[",
"'cl'",
"]",
"=",
"c",
".",
"location",
"return",
"serialized"
]
| Return a caveat as a dictionary for export as the JSON
macaroon v1 format. | [
"Return",
"a",
"caveat",
"as",
"a",
"dictionary",
"for",
"export",
"as",
"the",
"JSON",
"macaroon",
"v1",
"format",
"."
]
| python | train | 31.923077 |
p3trus/slave | slave/cryomagnetics/mps4g.py | https://github.com/p3trus/slave/blob/bdc74e73bd0f47b74a090c43aa2283c469cde3be/slave/cryomagnetics/mps4g.py#L224-L242 | def sweep(self, mode, speed=None):
"""Starts the output current sweep.
:param mode: The sweep mode. Valid entries are `'UP'`, `'DOWN'`,
`'PAUSE'`or `'ZERO'`. If in shim mode, `'LIMIT'` is valid as well.
:param speed: The sweeping speed. Valid entries are `'FAST'`, `'SLOW'`
or `None`.
"""
sweep_modes = ['UP', 'DOWN', 'PAUSE', 'ZERO', 'LIMIT']
sweep_speed = ['SLOW', 'FAST', None]
if not mode in sweep_modes:
raise ValueError('Invalid sweep mode.')
if not speed in sweep_speed:
raise ValueError('Invalid sweep speed.')
if speed is None:
self._write('SWEEP {0}'.format(mode))
else:
self._write('SWEEP {0} {1}'.format(mode, speed)) | [
"def",
"sweep",
"(",
"self",
",",
"mode",
",",
"speed",
"=",
"None",
")",
":",
"sweep_modes",
"=",
"[",
"'UP'",
",",
"'DOWN'",
",",
"'PAUSE'",
",",
"'ZERO'",
",",
"'LIMIT'",
"]",
"sweep_speed",
"=",
"[",
"'SLOW'",
",",
"'FAST'",
",",
"None",
"]",
"if",
"not",
"mode",
"in",
"sweep_modes",
":",
"raise",
"ValueError",
"(",
"'Invalid sweep mode.'",
")",
"if",
"not",
"speed",
"in",
"sweep_speed",
":",
"raise",
"ValueError",
"(",
"'Invalid sweep speed.'",
")",
"if",
"speed",
"is",
"None",
":",
"self",
".",
"_write",
"(",
"'SWEEP {0}'",
".",
"format",
"(",
"mode",
")",
")",
"else",
":",
"self",
".",
"_write",
"(",
"'SWEEP {0} {1}'",
".",
"format",
"(",
"mode",
",",
"speed",
")",
")"
]
| Starts the output current sweep.
:param mode: The sweep mode. Valid entries are `'UP'`, `'DOWN'`,
`'PAUSE'`or `'ZERO'`. If in shim mode, `'LIMIT'` is valid as well.
:param speed: The sweeping speed. Valid entries are `'FAST'`, `'SLOW'`
or `None`. | [
"Starts",
"the",
"output",
"current",
"sweep",
"."
]
| python | train | 40.263158 |
softlayer/softlayer-python | SoftLayer/CLI/loadbal/group_edit.py | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/loadbal/group_edit.py#L25-L43 | def cli(env, identifier, allocation, port, routing_type, routing_method):
"""Edit an existing load balancer service group."""
mgr = SoftLayer.LoadBalancerManager(env.client)
loadbal_id, group_id = loadbal.parse_id(identifier)
# check if any input is provided
if not any([allocation, port, routing_type, routing_method]):
raise exceptions.CLIAbort(
'At least one property is required to be changed!')
mgr.edit_service_group(loadbal_id,
group_id,
allocation=allocation,
port=port,
routing_type=routing_type,
routing_method=routing_method)
env.fout('Load balancer service group %s is being updated!' % identifier) | [
"def",
"cli",
"(",
"env",
",",
"identifier",
",",
"allocation",
",",
"port",
",",
"routing_type",
",",
"routing_method",
")",
":",
"mgr",
"=",
"SoftLayer",
".",
"LoadBalancerManager",
"(",
"env",
".",
"client",
")",
"loadbal_id",
",",
"group_id",
"=",
"loadbal",
".",
"parse_id",
"(",
"identifier",
")",
"# check if any input is provided",
"if",
"not",
"any",
"(",
"[",
"allocation",
",",
"port",
",",
"routing_type",
",",
"routing_method",
"]",
")",
":",
"raise",
"exceptions",
".",
"CLIAbort",
"(",
"'At least one property is required to be changed!'",
")",
"mgr",
".",
"edit_service_group",
"(",
"loadbal_id",
",",
"group_id",
",",
"allocation",
"=",
"allocation",
",",
"port",
"=",
"port",
",",
"routing_type",
"=",
"routing_type",
",",
"routing_method",
"=",
"routing_method",
")",
"env",
".",
"fout",
"(",
"'Load balancer service group %s is being updated!'",
"%",
"identifier",
")"
]
| Edit an existing load balancer service group. | [
"Edit",
"an",
"existing",
"load",
"balancer",
"service",
"group",
"."
]
| python | train | 41 |
sentinel-hub/sentinelhub-py | sentinelhub/constants.py | https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/constants.py#L424-L445 | def canonical_extension(fmt_ext):
""" Canonical extension of file format extension
Converts the format extension fmt_ext into the canonical extension for that format. For example,
``canonical_extension('tif') == 'tiff'``. Here we agree that the canonical extension for format F is F.value
:param fmt_ext: A string representing an extension (e.g. ``'txt'``, ``'png'``, etc.)
:type fmt_ext: str
:return: The canonical form of the extension (e.g. if ``fmt_ext='tif'`` then we return ``'tiff'``)
:rtype: str
"""
if MimeType.has_value(fmt_ext):
return fmt_ext
try:
return {
'tif': MimeType.TIFF.value,
'jpeg': MimeType.JPG.value,
'hdf5': MimeType.HDF.value,
'h5': MimeType.HDF.value
}[fmt_ext]
except KeyError:
raise ValueError('Data format .{} is not supported'.format(fmt_ext)) | [
"def",
"canonical_extension",
"(",
"fmt_ext",
")",
":",
"if",
"MimeType",
".",
"has_value",
"(",
"fmt_ext",
")",
":",
"return",
"fmt_ext",
"try",
":",
"return",
"{",
"'tif'",
":",
"MimeType",
".",
"TIFF",
".",
"value",
",",
"'jpeg'",
":",
"MimeType",
".",
"JPG",
".",
"value",
",",
"'hdf5'",
":",
"MimeType",
".",
"HDF",
".",
"value",
",",
"'h5'",
":",
"MimeType",
".",
"HDF",
".",
"value",
"}",
"[",
"fmt_ext",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'Data format .{} is not supported'",
".",
"format",
"(",
"fmt_ext",
")",
")"
]
| Canonical extension of file format extension
Converts the format extension fmt_ext into the canonical extension for that format. For example,
``canonical_extension('tif') == 'tiff'``. Here we agree that the canonical extension for format F is F.value
:param fmt_ext: A string representing an extension (e.g. ``'txt'``, ``'png'``, etc.)
:type fmt_ext: str
:return: The canonical form of the extension (e.g. if ``fmt_ext='tif'`` then we return ``'tiff'``)
:rtype: str | [
"Canonical",
"extension",
"of",
"file",
"format",
"extension"
]
| python | train | 43.409091 |
aaugustin/websockets | src/websockets/server.py | https://github.com/aaugustin/websockets/blob/17b3f47549b6f752a1be07fa1ba3037cb59c7d56/src/websockets/server.py#L269-L303 | def process_request(
self, path: str, request_headers: Headers
) -> Union[Optional[HTTPResponse], Awaitable[Optional[HTTPResponse]]]:
"""
Intercept the HTTP request and return an HTTP response if needed.
``request_headers`` is a :class:`~websockets.http.Headers` instance.
If this method returns ``None``, the WebSocket handshake continues.
If it returns a status code, headers and a response body, that HTTP
response is sent and the connection is closed.
The HTTP status must be a :class:`~http.HTTPStatus`.
HTTP headers must be a :class:`~websockets.http.Headers` instance, a
:class:`~collections.abc.Mapping`, or an iterable of ``(name, value)``
pairs.
The HTTP response body must be :class:`bytes`. It may be empty.
This method may be overridden to check the request headers and set a
different status, for example to authenticate the request and return
``HTTPStatus.UNAUTHORIZED`` or ``HTTPStatus.FORBIDDEN``.
It can be declared as a function or as a coroutine because such
authentication checks are likely to require network requests.
It may also be overridden by passing a ``process_request`` argument to
the :class:`WebSocketServerProtocol` constructor or the :func:`serve`
function.
"""
if self._process_request is not None:
return self._process_request(path, request_headers)
return None | [
"def",
"process_request",
"(",
"self",
",",
"path",
":",
"str",
",",
"request_headers",
":",
"Headers",
")",
"->",
"Union",
"[",
"Optional",
"[",
"HTTPResponse",
"]",
",",
"Awaitable",
"[",
"Optional",
"[",
"HTTPResponse",
"]",
"]",
"]",
":",
"if",
"self",
".",
"_process_request",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_process_request",
"(",
"path",
",",
"request_headers",
")",
"return",
"None"
]
| Intercept the HTTP request and return an HTTP response if needed.
``request_headers`` is a :class:`~websockets.http.Headers` instance.
If this method returns ``None``, the WebSocket handshake continues.
If it returns a status code, headers and a response body, that HTTP
response is sent and the connection is closed.
The HTTP status must be a :class:`~http.HTTPStatus`.
HTTP headers must be a :class:`~websockets.http.Headers` instance, a
:class:`~collections.abc.Mapping`, or an iterable of ``(name, value)``
pairs.
The HTTP response body must be :class:`bytes`. It may be empty.
This method may be overridden to check the request headers and set a
different status, for example to authenticate the request and return
``HTTPStatus.UNAUTHORIZED`` or ``HTTPStatus.FORBIDDEN``.
It can be declared as a function or as a coroutine because such
authentication checks are likely to require network requests.
It may also be overridden by passing a ``process_request`` argument to
the :class:`WebSocketServerProtocol` constructor or the :func:`serve`
function. | [
"Intercept",
"the",
"HTTP",
"request",
"and",
"return",
"an",
"HTTP",
"response",
"if",
"needed",
"."
]
| python | train | 42.057143 |
square/connect-python-sdk | squareconnect/models/charge_request.py | https://github.com/square/connect-python-sdk/blob/adc1d09e817986cdc607391580f71d6b48ed4066/squareconnect/models/charge_request.py#L382-L396 | def order_id(self, order_id):
"""
Sets the order_id of this ChargeRequest.
The ID of the order to associate with this transaction. If you provide this value, the `amount_money` value of your request must __exactly match__ the value of the order's `total_money` field.
:param order_id: The order_id of this ChargeRequest.
:type: str
"""
if order_id is None:
raise ValueError("Invalid value for `order_id`, must not be `None`")
if len(order_id) > 192:
raise ValueError("Invalid value for `order_id`, length must be less than `192`")
self._order_id = order_id | [
"def",
"order_id",
"(",
"self",
",",
"order_id",
")",
":",
"if",
"order_id",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `order_id`, must not be `None`\"",
")",
"if",
"len",
"(",
"order_id",
")",
">",
"192",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `order_id`, length must be less than `192`\"",
")",
"self",
".",
"_order_id",
"=",
"order_id"
]
| Sets the order_id of this ChargeRequest.
The ID of the order to associate with this transaction. If you provide this value, the `amount_money` value of your request must __exactly match__ the value of the order's `total_money` field.
:param order_id: The order_id of this ChargeRequest.
:type: str | [
"Sets",
"the",
"order_id",
"of",
"this",
"ChargeRequest",
".",
"The",
"ID",
"of",
"the",
"order",
"to",
"associate",
"with",
"this",
"transaction",
".",
"If",
"you",
"provide",
"this",
"value",
"the",
"amount_money",
"value",
"of",
"your",
"request",
"must",
"__exactly",
"match__",
"the",
"value",
"of",
"the",
"order",
"s",
"total_money",
"field",
"."
]
| python | train | 42.8 |
atztogo/phonopy | phonopy/api_phonopy.py | https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/api_phonopy.py#L1519-L1538 | def get_total_DOS(self):
"""Return frequency points and total DOS as a tuple.
Returns
-------
A tuple with (frequency_points, total_dos).
frequency_points: ndarray
shape=(frequency_sampling_points, ), dtype='double'
total_dos:
shape=(frequency_sampling_points, ), dtype='double'
"""
warnings.warn("Phonopy.get_total_DOS is deprecated. "
"Use Phonopy.get_total_dos_dict.", DeprecationWarning)
dos = self.get_total_dos_dict()
return dos['frequency_points'], dos['total_dos'] | [
"def",
"get_total_DOS",
"(",
"self",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Phonopy.get_total_DOS is deprecated. \"",
"\"Use Phonopy.get_total_dos_dict.\"",
",",
"DeprecationWarning",
")",
"dos",
"=",
"self",
".",
"get_total_dos_dict",
"(",
")",
"return",
"dos",
"[",
"'frequency_points'",
"]",
",",
"dos",
"[",
"'total_dos'",
"]"
]
| Return frequency points and total DOS as a tuple.
Returns
-------
A tuple with (frequency_points, total_dos).
frequency_points: ndarray
shape=(frequency_sampling_points, ), dtype='double'
total_dos:
shape=(frequency_sampling_points, ), dtype='double' | [
"Return",
"frequency",
"points",
"and",
"total",
"DOS",
"as",
"a",
"tuple",
"."
]
| python | train | 29.25 |
automl/HpBandSter | hpbandster/core/worker.py | https://github.com/automl/HpBandSter/blob/841db4b827f342e5eb7f725723ea6461ac52d45a/hpbandster/core/worker.py#L70-L95 | def load_nameserver_credentials(self, working_directory, num_tries=60, interval=1):
"""
loads the nameserver credentials in cases where master and workers share a filesystem
Parameters
----------
working_directory: str
the working directory for the HPB run (see master)
num_tries: int
number of attempts to find the file (default 60)
interval: float
waiting period between the attempts
"""
fn = os.path.join(working_directory, 'HPB_run_%s_pyro.pkl'%self.run_id)
for i in range(num_tries):
try:
with open(fn, 'rb') as fh:
self.nameserver, self.nameserver_port = pickle.load(fh)
return
except FileNotFoundError:
self.logger.warning('config file %s not found (trail %i/%i)'%(fn, i+1, num_tries))
time.sleep(interval)
except:
raise
raise RuntimeError("Could not find the nameserver information, aborting!") | [
"def",
"load_nameserver_credentials",
"(",
"self",
",",
"working_directory",
",",
"num_tries",
"=",
"60",
",",
"interval",
"=",
"1",
")",
":",
"fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"working_directory",
",",
"'HPB_run_%s_pyro.pkl'",
"%",
"self",
".",
"run_id",
")",
"for",
"i",
"in",
"range",
"(",
"num_tries",
")",
":",
"try",
":",
"with",
"open",
"(",
"fn",
",",
"'rb'",
")",
"as",
"fh",
":",
"self",
".",
"nameserver",
",",
"self",
".",
"nameserver_port",
"=",
"pickle",
".",
"load",
"(",
"fh",
")",
"return",
"except",
"FileNotFoundError",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"'config file %s not found (trail %i/%i)'",
"%",
"(",
"fn",
",",
"i",
"+",
"1",
",",
"num_tries",
")",
")",
"time",
".",
"sleep",
"(",
"interval",
")",
"except",
":",
"raise",
"raise",
"RuntimeError",
"(",
"\"Could not find the nameserver information, aborting!\"",
")"
]
| loads the nameserver credentials in cases where master and workers share a filesystem
Parameters
----------
working_directory: str
the working directory for the HPB run (see master)
num_tries: int
number of attempts to find the file (default 60)
interval: float
waiting period between the attempts | [
"loads",
"the",
"nameserver",
"credentials",
"in",
"cases",
"where",
"master",
"and",
"workers",
"share",
"a",
"filesystem"
]
| python | train | 32.769231 |
alex-kostirin/pyatomac | atomac/AXClasses.py | https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/AXClasses.py#L219-L227 | def _postQueuedEvents(self, interval=0.01):
"""Private method to post queued events (e.g. Quartz events).
Each event in queue is a tuple (event call, args to event call).
"""
while len(self.eventList) > 0:
(nextEvent, args) = self.eventList.popleft()
nextEvent(*args)
time.sleep(interval) | [
"def",
"_postQueuedEvents",
"(",
"self",
",",
"interval",
"=",
"0.01",
")",
":",
"while",
"len",
"(",
"self",
".",
"eventList",
")",
">",
"0",
":",
"(",
"nextEvent",
",",
"args",
")",
"=",
"self",
".",
"eventList",
".",
"popleft",
"(",
")",
"nextEvent",
"(",
"*",
"args",
")",
"time",
".",
"sleep",
"(",
"interval",
")"
]
| Private method to post queued events (e.g. Quartz events).
Each event in queue is a tuple (event call, args to event call). | [
"Private",
"method",
"to",
"post",
"queued",
"events",
"(",
"e",
".",
"g",
".",
"Quartz",
"events",
")",
"."
]
| python | valid | 38.777778 |
aio-libs/aiobotocore | aiobotocore/client.py | https://github.com/aio-libs/aiobotocore/blob/d0c0a8651a3738b6260efe962218a5738694dd2a/aiobotocore/client.py#L124-L176 | def get_paginator(self, operation_name):
"""Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you'd normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator("create_foo")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
if not self.can_paginate(operation_name):
raise OperationNotPageableError(operation_name=operation_name)
else:
# substitute iterator with async one
Paginator.PAGE_ITERATOR_CLS = AioPageIterator
actual_operation_name = self._PY_TO_OP_NAME[operation_name]
# Create a new paginate method that will serve as a proxy to
# the underlying Paginator.paginate method. This is needed to
# attach a docstring to the method.
def paginate(self, **kwargs):
return Paginator.paginate(self, **kwargs)
paginator_config = self._cache['page_config'][
actual_operation_name]
# Rename the paginator class based on the type of paginator.
paginator_class_name = str('%s.Paginator.%s' % (
get_service_module_name(self.meta.service_model),
actual_operation_name))
# Create the new paginator class
documented_paginator_cls = type(
paginator_class_name, (Paginator,), {'paginate': paginate})
operation_model = self._service_model.\
operation_model(actual_operation_name)
paginator = documented_paginator_cls(
getattr(self, operation_name),
paginator_config,
operation_model)
return paginator | [
"def",
"get_paginator",
"(",
"self",
",",
"operation_name",
")",
":",
"if",
"not",
"self",
".",
"can_paginate",
"(",
"operation_name",
")",
":",
"raise",
"OperationNotPageableError",
"(",
"operation_name",
"=",
"operation_name",
")",
"else",
":",
"# substitute iterator with async one",
"Paginator",
".",
"PAGE_ITERATOR_CLS",
"=",
"AioPageIterator",
"actual_operation_name",
"=",
"self",
".",
"_PY_TO_OP_NAME",
"[",
"operation_name",
"]",
"# Create a new paginate method that will serve as a proxy to",
"# the underlying Paginator.paginate method. This is needed to",
"# attach a docstring to the method.",
"def",
"paginate",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"Paginator",
".",
"paginate",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
"paginator_config",
"=",
"self",
".",
"_cache",
"[",
"'page_config'",
"]",
"[",
"actual_operation_name",
"]",
"# Rename the paginator class based on the type of paginator.",
"paginator_class_name",
"=",
"str",
"(",
"'%s.Paginator.%s'",
"%",
"(",
"get_service_module_name",
"(",
"self",
".",
"meta",
".",
"service_model",
")",
",",
"actual_operation_name",
")",
")",
"# Create the new paginator class",
"documented_paginator_cls",
"=",
"type",
"(",
"paginator_class_name",
",",
"(",
"Paginator",
",",
")",
",",
"{",
"'paginate'",
":",
"paginate",
"}",
")",
"operation_model",
"=",
"self",
".",
"_service_model",
".",
"operation_model",
"(",
"actual_operation_name",
")",
"paginator",
"=",
"documented_paginator_cls",
"(",
"getattr",
"(",
"self",
",",
"operation_name",
")",
",",
"paginator_config",
",",
"operation_model",
")",
"return",
"paginator"
]
| Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you'd normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator("create_foo")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object. | [
"Create",
"a",
"paginator",
"for",
"an",
"operation",
"."
]
| python | train | 41.45283 |
botstory/botstory | botstory/ast/story_context/reducers.py | https://github.com/botstory/botstory/blob/9c5b2fc7f7a14dbd467d70f60d5ba855ef89dac3/botstory/ast/story_context/reducers.py#L88-L114 | def iterate_storyline(ctx):
"""
iterate the last storyline from the last visited story part
:param ctx:
:return:
"""
logger.debug('# start iterate')
compiled_story = ctx.compiled_story()
if not compiled_story:
return
for step in range(ctx.current_step(),
len(compiled_story.story_line)):
ctx = ctx.clone()
tail = ctx.stack_tail()
ctx.message = modify_stack_in_message(ctx.message,
lambda stack: stack[:-1] + [{
'data': tail['data'],
'step': step,
'topic': tail['topic'],
}])
logger.debug('# [{}] iterate'.format(step))
logger.debug(ctx)
ctx = yield ctx | [
"def",
"iterate_storyline",
"(",
"ctx",
")",
":",
"logger",
".",
"debug",
"(",
"'# start iterate'",
")",
"compiled_story",
"=",
"ctx",
".",
"compiled_story",
"(",
")",
"if",
"not",
"compiled_story",
":",
"return",
"for",
"step",
"in",
"range",
"(",
"ctx",
".",
"current_step",
"(",
")",
",",
"len",
"(",
"compiled_story",
".",
"story_line",
")",
")",
":",
"ctx",
"=",
"ctx",
".",
"clone",
"(",
")",
"tail",
"=",
"ctx",
".",
"stack_tail",
"(",
")",
"ctx",
".",
"message",
"=",
"modify_stack_in_message",
"(",
"ctx",
".",
"message",
",",
"lambda",
"stack",
":",
"stack",
"[",
":",
"-",
"1",
"]",
"+",
"[",
"{",
"'data'",
":",
"tail",
"[",
"'data'",
"]",
",",
"'step'",
":",
"step",
",",
"'topic'",
":",
"tail",
"[",
"'topic'",
"]",
",",
"}",
"]",
")",
"logger",
".",
"debug",
"(",
"'# [{}] iterate'",
".",
"format",
"(",
"step",
")",
")",
"logger",
".",
"debug",
"(",
"ctx",
")",
"ctx",
"=",
"yield",
"ctx"
]
| iterate the last storyline from the last visited story part
:param ctx:
:return: | [
"iterate",
"the",
"last",
"storyline",
"from",
"the",
"last",
"visited",
"story",
"part"
]
| python | train | 32.814815 |
mdickinson/bigfloat | bigfloat/core.py | https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L2432-L2444 | def frac(x, context=None):
"""
Return the fractional part of ``x``.
The result has the same sign as ``x``.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_frac,
(BigFloat._implicit_convert(x),),
context,
) | [
"def",
"frac",
"(",
"x",
",",
"context",
"=",
"None",
")",
":",
"return",
"_apply_function_in_current_context",
"(",
"BigFloat",
",",
"mpfr",
".",
"mpfr_frac",
",",
"(",
"BigFloat",
".",
"_implicit_convert",
"(",
"x",
")",
",",
")",
",",
"context",
",",
")"
]
| Return the fractional part of ``x``.
The result has the same sign as ``x``. | [
"Return",
"the",
"fractional",
"part",
"of",
"x",
"."
]
| python | train | 20.769231 |
phoebe-project/phoebe2 | phoebe/dependencies/autofig/call.py | https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/dependencies/autofig/call.py#L1783-L1795 | def error(self, error):
"""
set the error
"""
# TODO: check length with value?
# TODO: type checks (similar to value)
if self.direction not in ['x', 'y', 'z'] and error is not None:
raise ValueError("error only accepted for x, y, z dimensions")
if isinstance(error, u.Quantity):
error = error.to(self.unit).value
self._error = error | [
"def",
"error",
"(",
"self",
",",
"error",
")",
":",
"# TODO: check length with value?",
"# TODO: type checks (similar to value)",
"if",
"self",
".",
"direction",
"not",
"in",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
"]",
"and",
"error",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"error only accepted for x, y, z dimensions\"",
")",
"if",
"isinstance",
"(",
"error",
",",
"u",
".",
"Quantity",
")",
":",
"error",
"=",
"error",
".",
"to",
"(",
"self",
".",
"unit",
")",
".",
"value",
"self",
".",
"_error",
"=",
"error"
]
| set the error | [
"set",
"the",
"error"
]
| python | train | 31.538462 |
pyGrowler/Growler | growler/core/application.py | https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/core/application.py#L685-L712 | def create_server_and_run_forever(self, loop=None, **server_config):
"""
Helper function which constructs an HTTP server and listens the
loop forever.
This function exists only to remove boilerplate code for starting
up a growler app.
Args:
**server_config: These keyword arguments are forwarded
directly to the BaseEventLoop.create_server function.
Consult their documentation for details.
Parameters:
loop (asyncio.BaseEventLoop): Optional parameter for specifying
an event loop which will handle socket setup.
**server_config: These keyword arguments are forwarded directly to
the create_server function.
"""
if loop is None:
import asyncio
loop = asyncio.get_event_loop()
self.create_server(loop=loop, **server_config)
try:
loop.run_forever()
except KeyboardInterrupt:
pass | [
"def",
"create_server_and_run_forever",
"(",
"self",
",",
"loop",
"=",
"None",
",",
"*",
"*",
"server_config",
")",
":",
"if",
"loop",
"is",
"None",
":",
"import",
"asyncio",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"self",
".",
"create_server",
"(",
"loop",
"=",
"loop",
",",
"*",
"*",
"server_config",
")",
"try",
":",
"loop",
".",
"run_forever",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"pass"
]
| Helper function which constructs an HTTP server and listens the
loop forever.
This function exists only to remove boilerplate code for starting
up a growler app.
Args:
**server_config: These keyword arguments are forwarded
directly to the BaseEventLoop.create_server function.
Consult their documentation for details.
Parameters:
loop (asyncio.BaseEventLoop): Optional parameter for specifying
an event loop which will handle socket setup.
**server_config: These keyword arguments are forwarded directly to
the create_server function. | [
"Helper",
"function",
"which",
"constructs",
"an",
"HTTP",
"server",
"and",
"listens",
"the",
"loop",
"forever",
"."
]
| python | train | 35.642857 |
PythonCharmers/python-future | src/future/backports/datetime.py | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/datetime.py#L941-L966 | def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst | [
"def",
"fromutc",
"(",
"self",
",",
"dt",
")",
":",
"if",
"not",
"isinstance",
"(",
"dt",
",",
"datetime",
")",
":",
"raise",
"TypeError",
"(",
"\"fromutc() requires a datetime argument\"",
")",
"if",
"dt",
".",
"tzinfo",
"is",
"not",
"self",
":",
"raise",
"ValueError",
"(",
"\"dt.tzinfo is not self\"",
")",
"dtoff",
"=",
"dt",
".",
"utcoffset",
"(",
")",
"if",
"dtoff",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"fromutc() requires a non-None utcoffset() \"",
"\"result\"",
")",
"# See the long comment block at the end of this file for an",
"# explanation of this algorithm.",
"dtdst",
"=",
"dt",
".",
"dst",
"(",
")",
"if",
"dtdst",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"fromutc() requires a non-None dst() result\"",
")",
"delta",
"=",
"dtoff",
"-",
"dtdst",
"if",
"delta",
":",
"dt",
"+=",
"delta",
"dtdst",
"=",
"dt",
".",
"dst",
"(",
")",
"if",
"dtdst",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"fromutc(): dt.dst gave inconsistent \"",
"\"results; cannot convert\"",
")",
"return",
"dt",
"+",
"dtdst"
]
| datetime in UTC -> datetime in local time. | [
"datetime",
"in",
"UTC",
"-",
">",
"datetime",
"in",
"local",
"time",
"."
]
| python | train | 36.384615 |
yahoo/TensorFlowOnSpark | examples/imagenet/inception/slim/losses.py | https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/examples/imagenet/inception/slim/losses.py#L56-L72 | def l2_regularizer(weight=1.0, scope=None):
"""Define a L2 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.name_scope(scope, 'L2Regularizer', [tensor]):
l2_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value')
return regularizer | [
"def",
"l2_regularizer",
"(",
"weight",
"=",
"1.0",
",",
"scope",
"=",
"None",
")",
":",
"def",
"regularizer",
"(",
"tensor",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"scope",
",",
"'L2Regularizer'",
",",
"[",
"tensor",
"]",
")",
":",
"l2_weight",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"weight",
",",
"dtype",
"=",
"tensor",
".",
"dtype",
".",
"base_dtype",
",",
"name",
"=",
"'weight'",
")",
"return",
"tf",
".",
"multiply",
"(",
"l2_weight",
",",
"tf",
".",
"nn",
".",
"l2_loss",
"(",
"tensor",
")",
",",
"name",
"=",
"'value'",
")",
"return",
"regularizer"
]
| Define a L2 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function. | [
"Define",
"a",
"L2",
"regularizer",
"."
]
| python | train | 32.176471 |
sdispater/eloquent | eloquent/schema/grammars/grammar.py | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/schema/grammars/grammar.py#L107-L123 | def _get_columns(self, blueprint):
"""
Get the blueprint's columns definitions.
:param blueprint: The blueprint
:type blueprint: Blueprint
:rtype: list
"""
columns = []
for column in blueprint.get_added_columns():
sql = self.wrap(column) + ' ' + self._get_type(column)
columns.append(self._add_modifiers(sql, blueprint, column))
return columns | [
"def",
"_get_columns",
"(",
"self",
",",
"blueprint",
")",
":",
"columns",
"=",
"[",
"]",
"for",
"column",
"in",
"blueprint",
".",
"get_added_columns",
"(",
")",
":",
"sql",
"=",
"self",
".",
"wrap",
"(",
"column",
")",
"+",
"' '",
"+",
"self",
".",
"_get_type",
"(",
"column",
")",
"columns",
".",
"append",
"(",
"self",
".",
"_add_modifiers",
"(",
"sql",
",",
"blueprint",
",",
"column",
")",
")",
"return",
"columns"
]
| Get the blueprint's columns definitions.
:param blueprint: The blueprint
:type blueprint: Blueprint
:rtype: list | [
"Get",
"the",
"blueprint",
"s",
"columns",
"definitions",
"."
]
| python | train | 25.176471 |
cirruscluster/cirruscluster | cirruscluster/ext/ansible/runner/lookup_plugins/sequence.py | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/lookup_plugins/sequence.py#L80-L86 | def reset(self):
"""set sensible defaults"""
self.start = 1
self.count = None
self.end = None
self.stride = 1
self.format = "%d" | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"start",
"=",
"1",
"self",
".",
"count",
"=",
"None",
"self",
".",
"end",
"=",
"None",
"self",
".",
"stride",
"=",
"1",
"self",
".",
"format",
"=",
"\"%d\""
]
| set sensible defaults | [
"set",
"sensible",
"defaults"
]
| python | train | 24.285714 |
gwastro/pycbc | pycbc/inference/sampler/__init__.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/sampler/__init__.py#L33-L55 | def load_from_config(cp, model, **kwargs):
"""Loads a sampler from the given config file.
This looks for a name in the section ``[sampler]`` to determine which
sampler class to load. That sampler's ``from_config`` is then called.
Parameters
----------
cp : WorkflowConfigParser
Config parser to read from.
model : pycbc.inference.model
Which model to pass to the sampler.
\**kwargs :
All other keyword arguments are passed directly to the sampler's
``from_config`` file.
Returns
-------
sampler :
The initialized sampler.
"""
name = cp.get('sampler', 'name')
return samplers[name].from_config(cp, model, **kwargs) | [
"def",
"load_from_config",
"(",
"cp",
",",
"model",
",",
"*",
"*",
"kwargs",
")",
":",
"name",
"=",
"cp",
".",
"get",
"(",
"'sampler'",
",",
"'name'",
")",
"return",
"samplers",
"[",
"name",
"]",
".",
"from_config",
"(",
"cp",
",",
"model",
",",
"*",
"*",
"kwargs",
")"
]
| Loads a sampler from the given config file.
This looks for a name in the section ``[sampler]`` to determine which
sampler class to load. That sampler's ``from_config`` is then called.
Parameters
----------
cp : WorkflowConfigParser
Config parser to read from.
model : pycbc.inference.model
Which model to pass to the sampler.
\**kwargs :
All other keyword arguments are passed directly to the sampler's
``from_config`` file.
Returns
-------
sampler :
The initialized sampler. | [
"Loads",
"a",
"sampler",
"from",
"the",
"given",
"config",
"file",
"."
]
| python | train | 30 |
googleapis/google-auth-library-python | google/auth/_helpers.py | https://github.com/googleapis/google-auth-library-python/blob/2c6ad78917e936f38f87c946209c8031166dc96e/google/auth/_helpers.py#L82-L105 | def to_bytes(value, encoding='utf-8'):
"""Converts a string value to bytes, if necessary.
Unfortunately, ``six.b`` is insufficient for this task since in
Python 2 because it does not modify ``unicode`` objects.
Args:
value (Union[str, bytes]): The value to be converted.
encoding (str): The encoding to use to convert unicode to bytes.
Defaults to "utf-8".
Returns:
bytes: The original value converted to bytes (if unicode) or as
passed in if it started out as bytes.
Raises:
ValueError: If the value could not be converted to bytes.
"""
result = (value.encode(encoding)
if isinstance(value, six.text_type) else value)
if isinstance(result, six.binary_type):
return result
else:
raise ValueError('{0!r} could not be converted to bytes'.format(value)) | [
"def",
"to_bytes",
"(",
"value",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"result",
"=",
"(",
"value",
".",
"encode",
"(",
"encoding",
")",
"if",
"isinstance",
"(",
"value",
",",
"six",
".",
"text_type",
")",
"else",
"value",
")",
"if",
"isinstance",
"(",
"result",
",",
"six",
".",
"binary_type",
")",
":",
"return",
"result",
"else",
":",
"raise",
"ValueError",
"(",
"'{0!r} could not be converted to bytes'",
".",
"format",
"(",
"value",
")",
")"
]
| Converts a string value to bytes, if necessary.
Unfortunately, ``six.b`` is insufficient for this task since in
Python 2 because it does not modify ``unicode`` objects.
Args:
value (Union[str, bytes]): The value to be converted.
encoding (str): The encoding to use to convert unicode to bytes.
Defaults to "utf-8".
Returns:
bytes: The original value converted to bytes (if unicode) or as
passed in if it started out as bytes.
Raises:
ValueError: If the value could not be converted to bytes. | [
"Converts",
"a",
"string",
"value",
"to",
"bytes",
"if",
"necessary",
"."
]
| python | train | 35.708333 |
log2timeline/dfvfs | dfvfs/vfs/fake_file_system.py | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/fake_file_system.py#L128-L147 | def GetFileEntryByPath(self, path):
"""Retrieves a file entry for a path.
Args:
path (str): path of the file entry.
Returns:
FakeFileEntry: a file entry or None if not available.
"""
if path is None:
return None
file_entry_type, _ = self._paths.get(path, (None, None))
if not file_entry_type:
return None
path_spec = fake_path_spec.FakePathSpec(location=path)
return fake_file_entry.FakeFileEntry(
self._resolver_context, self, path_spec,
file_entry_type=file_entry_type) | [
"def",
"GetFileEntryByPath",
"(",
"self",
",",
"path",
")",
":",
"if",
"path",
"is",
"None",
":",
"return",
"None",
"file_entry_type",
",",
"_",
"=",
"self",
".",
"_paths",
".",
"get",
"(",
"path",
",",
"(",
"None",
",",
"None",
")",
")",
"if",
"not",
"file_entry_type",
":",
"return",
"None",
"path_spec",
"=",
"fake_path_spec",
".",
"FakePathSpec",
"(",
"location",
"=",
"path",
")",
"return",
"fake_file_entry",
".",
"FakeFileEntry",
"(",
"self",
".",
"_resolver_context",
",",
"self",
",",
"path_spec",
",",
"file_entry_type",
"=",
"file_entry_type",
")"
]
| Retrieves a file entry for a path.
Args:
path (str): path of the file entry.
Returns:
FakeFileEntry: a file entry or None if not available. | [
"Retrieves",
"a",
"file",
"entry",
"for",
"a",
"path",
"."
]
| python | train | 26.6 |
common-workflow-language/cwltool | cwltool/main.py | https://github.com/common-workflow-language/cwltool/blob/cb81b22abc52838823da9945f04d06739ab32fda/cwltool/main.py#L892-L900 | def find_default_container(builder, # type: HasReqsHints
default_container=None, # type: Text
use_biocontainers=None, # type: bool
): # type: (...) -> Optional[Text]
"""Default finder for default containers."""
if not default_container and use_biocontainers:
default_container = get_container_from_software_requirements(
use_biocontainers, builder)
return default_container | [
"def",
"find_default_container",
"(",
"builder",
",",
"# type: HasReqsHints",
"default_container",
"=",
"None",
",",
"# type: Text",
"use_biocontainers",
"=",
"None",
",",
"# type: bool",
")",
":",
"# type: (...) -> Optional[Text]",
"if",
"not",
"default_container",
"and",
"use_biocontainers",
":",
"default_container",
"=",
"get_container_from_software_requirements",
"(",
"use_biocontainers",
",",
"builder",
")",
"return",
"default_container"
]
| Default finder for default containers. | [
"Default",
"finder",
"for",
"default",
"containers",
"."
]
| python | train | 55.333333 |
DarkEnergySurvey/ugali | ugali/isochrone/model.py | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/isochrone/model.py#L470-L503 | def observableFractionCMDX(self, mask, distance_modulus, mass_min=0.1):
"""
Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: Could this function be even faster / more readable?
ADW: Should this include magnitude error leakage?
"""
mass_init_array,mass_pdf_array,mass_act_array,mag_1_array,mag_2_array = self.sample(mass_min=mass_min,full_data_range=False)
mag = mag_1_array if self.band_1_detection else mag_2_array
color = mag_1_array - mag_2_array
# ADW: Only calculate observable fraction over interior pixels...
pixels = mask.roi.pixels_interior
mag_1_mask = mask.mask_1.mask_roi_sparse[mask.roi.pixel_interior_cut]
mag_2_mask = mask.mask_2.mask_roi_sparse[mask.roi.pixel_interior_cut]
# ADW: Restrict mag and color to range of mask with sufficient solid angle
cmd_cut = ugali.utils.binning.take2D(mask.solid_angle_cmd,color,mag+distance_modulus,
mask.roi.bins_color, mask.roi.bins_mag) > 0
# Pre-apply these cuts to the 1D mass_pdf_array to save time
mass_pdf_cut = mass_pdf_array*cmd_cut
# Create 2D arrays of cuts for each pixel
mask_1_cut = (mag_1_array+distance_modulus)[:,np.newaxis] < mag_1_mask
mask_2_cut = (mag_2_array+distance_modulus)[:,np.newaxis] < mag_2_mask
mask_cut_repeat = mask_1_cut & mask_2_cut
observable_fraction = (mass_pdf_cut[:,np.newaxis]*mask_cut_repeat).sum(axis=0)
return observable_fraction | [
"def",
"observableFractionCMDX",
"(",
"self",
",",
"mask",
",",
"distance_modulus",
",",
"mass_min",
"=",
"0.1",
")",
":",
"mass_init_array",
",",
"mass_pdf_array",
",",
"mass_act_array",
",",
"mag_1_array",
",",
"mag_2_array",
"=",
"self",
".",
"sample",
"(",
"mass_min",
"=",
"mass_min",
",",
"full_data_range",
"=",
"False",
")",
"mag",
"=",
"mag_1_array",
"if",
"self",
".",
"band_1_detection",
"else",
"mag_2_array",
"color",
"=",
"mag_1_array",
"-",
"mag_2_array",
"# ADW: Only calculate observable fraction over interior pixels...",
"pixels",
"=",
"mask",
".",
"roi",
".",
"pixels_interior",
"mag_1_mask",
"=",
"mask",
".",
"mask_1",
".",
"mask_roi_sparse",
"[",
"mask",
".",
"roi",
".",
"pixel_interior_cut",
"]",
"mag_2_mask",
"=",
"mask",
".",
"mask_2",
".",
"mask_roi_sparse",
"[",
"mask",
".",
"roi",
".",
"pixel_interior_cut",
"]",
"# ADW: Restrict mag and color to range of mask with sufficient solid angle",
"cmd_cut",
"=",
"ugali",
".",
"utils",
".",
"binning",
".",
"take2D",
"(",
"mask",
".",
"solid_angle_cmd",
",",
"color",
",",
"mag",
"+",
"distance_modulus",
",",
"mask",
".",
"roi",
".",
"bins_color",
",",
"mask",
".",
"roi",
".",
"bins_mag",
")",
">",
"0",
"# Pre-apply these cuts to the 1D mass_pdf_array to save time",
"mass_pdf_cut",
"=",
"mass_pdf_array",
"*",
"cmd_cut",
"# Create 2D arrays of cuts for each pixel",
"mask_1_cut",
"=",
"(",
"mag_1_array",
"+",
"distance_modulus",
")",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"<",
"mag_1_mask",
"mask_2_cut",
"=",
"(",
"mag_2_array",
"+",
"distance_modulus",
")",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"<",
"mag_2_mask",
"mask_cut_repeat",
"=",
"mask_1_cut",
"&",
"mask_2_cut",
"observable_fraction",
"=",
"(",
"mass_pdf_cut",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"*",
"mask_cut_repeat",
")",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"return",
"observable_fraction"
]
| Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: Could this function be even faster / more readable?
ADW: Should this include magnitude error leakage? | [
"Compute",
"observable",
"fraction",
"of",
"stars",
"with",
"masses",
"greater",
"than",
"mass_min",
"in",
"each",
"pixel",
"in",
"the",
"interior",
"region",
"of",
"the",
"mask",
"."
]
| python | train | 54.764706 |
emory-libraries/eulfedora | eulfedora/server.py | https://github.com/emory-libraries/eulfedora/blob/161826f3fdcdab4007f6fa7dfd9f1ecabc4bcbe4/eulfedora/server.py#L193-L197 | def risearch(self):
"instance of :class:`eulfedora.api.ResourceIndex`, with the same root url and credentials"
if self._risearch is None:
self._risearch = ResourceIndex(self.fedora_root, self.username, self.password)
return self._risearch | [
"def",
"risearch",
"(",
"self",
")",
":",
"if",
"self",
".",
"_risearch",
"is",
"None",
":",
"self",
".",
"_risearch",
"=",
"ResourceIndex",
"(",
"self",
".",
"fedora_root",
",",
"self",
".",
"username",
",",
"self",
".",
"password",
")",
"return",
"self",
".",
"_risearch"
]
| instance of :class:`eulfedora.api.ResourceIndex`, with the same root url and credentials | [
"instance",
"of",
":",
"class",
":",
"eulfedora",
".",
"api",
".",
"ResourceIndex",
"with",
"the",
"same",
"root",
"url",
"and",
"credentials"
]
| python | train | 54 |
SKA-ScienceDataProcessor/integration-prototype | sip/platform/logging/sip_logging/sip_logging.py | https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/platform/logging/sip_logging/sip_logging.py#L119-L123 | def set_log_level(logger_name: str, log_level: str, propagate: bool = False):
"""Set the log level of the specified logger."""
log = logging.getLogger(logger_name)
log.propagate = propagate
log.setLevel(log_level) | [
"def",
"set_log_level",
"(",
"logger_name",
":",
"str",
",",
"log_level",
":",
"str",
",",
"propagate",
":",
"bool",
"=",
"False",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"logger_name",
")",
"log",
".",
"propagate",
"=",
"propagate",
"log",
".",
"setLevel",
"(",
"log_level",
")"
]
| Set the log level of the specified logger. | [
"Set",
"the",
"log",
"level",
"of",
"the",
"specified",
"logger",
"."
]
| python | train | 45 |
bukun/TorCMS | torcms/handlers/post_handler.py | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/post_handler.py#L345-L355 | def _gen_last_current_relation(self, post_id):
'''
Generate the relation for the post and last post viewed.
'''
last_post_id = self.get_secure_cookie('last_post_uid')
if last_post_id:
last_post_id = last_post_id.decode('utf-8')
self.set_secure_cookie('last_post_uid', post_id)
if last_post_id and MPost.get_by_uid(last_post_id):
self._add_relation(last_post_id, post_id) | [
"def",
"_gen_last_current_relation",
"(",
"self",
",",
"post_id",
")",
":",
"last_post_id",
"=",
"self",
".",
"get_secure_cookie",
"(",
"'last_post_uid'",
")",
"if",
"last_post_id",
":",
"last_post_id",
"=",
"last_post_id",
".",
"decode",
"(",
"'utf-8'",
")",
"self",
".",
"set_secure_cookie",
"(",
"'last_post_uid'",
",",
"post_id",
")",
"if",
"last_post_id",
"and",
"MPost",
".",
"get_by_uid",
"(",
"last_post_id",
")",
":",
"self",
".",
"_add_relation",
"(",
"last_post_id",
",",
"post_id",
")"
]
| Generate the relation for the post and last post viewed. | [
"Generate",
"the",
"relation",
"for",
"the",
"post",
"and",
"last",
"post",
"viewed",
"."
]
| python | train | 40.090909 |
Esri/ArcREST | src/arcrest/ags/mapservice.py | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/ags/mapservice.py#L445-L477 | def allLayers(self):
""" returns all layers for the service """
url = self._url + "/layers"
params = {
"f" : "json"
}
res = self._get(url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return_dict = {
"layers" : [],
"tables" : []
}
for k, v in res.items():
if k == "layers":
for val in v:
return_dict['layers'].append(
FeatureLayer(url=self._url + "/%s" % val['id'],
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
)
elif k == "tables":
for val in v:
return_dict['tables'].append(
TableLayer(url=self._url + "/%s" % val['id'],
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
)
del k,v
return return_dict | [
"def",
"allLayers",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"_url",
"+",
"\"/layers\"",
"params",
"=",
"{",
"\"f\"",
":",
"\"json\"",
"}",
"res",
"=",
"self",
".",
"_get",
"(",
"url",
",",
"param_dict",
"=",
"params",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")",
"return_dict",
"=",
"{",
"\"layers\"",
":",
"[",
"]",
",",
"\"tables\"",
":",
"[",
"]",
"}",
"for",
"k",
",",
"v",
"in",
"res",
".",
"items",
"(",
")",
":",
"if",
"k",
"==",
"\"layers\"",
":",
"for",
"val",
"in",
"v",
":",
"return_dict",
"[",
"'layers'",
"]",
".",
"append",
"(",
"FeatureLayer",
"(",
"url",
"=",
"self",
".",
"_url",
"+",
"\"/%s\"",
"%",
"val",
"[",
"'id'",
"]",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")",
")",
"elif",
"k",
"==",
"\"tables\"",
":",
"for",
"val",
"in",
"v",
":",
"return_dict",
"[",
"'tables'",
"]",
".",
"append",
"(",
"TableLayer",
"(",
"url",
"=",
"self",
".",
"_url",
"+",
"\"/%s\"",
"%",
"val",
"[",
"'id'",
"]",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")",
")",
"del",
"k",
",",
"v",
"return",
"return_dict"
]
| returns all layers for the service | [
"returns",
"all",
"layers",
"for",
"the",
"service"
]
| python | train | 40.424242 |
v1k45/python-qBittorrent | qbittorrent/client.py | https://github.com/v1k45/python-qBittorrent/blob/04f9482a022dcc78c56b0b9acb9ca455f855ae24/qbittorrent/client.py#L150-L169 | def torrents(self, **filters):
"""
Returns a list of torrents matching the supplied filters.
:param filter: Current status of the torrents.
:param category: Fetch all torrents with the supplied label.
:param sort: Sort torrents by.
:param reverse: Enable reverse sorting.
:param limit: Limit the number of torrents returned.
:param offset: Set offset (if less than 0, offset from end).
:return: list() of torrent with matching filter.
"""
params = {}
for name, value in filters.items():
# make sure that old 'status' argument still works
name = 'filter' if name == 'status' else name
params[name] = value
return self._get('query/torrents', params=params) | [
"def",
"torrents",
"(",
"self",
",",
"*",
"*",
"filters",
")",
":",
"params",
"=",
"{",
"}",
"for",
"name",
",",
"value",
"in",
"filters",
".",
"items",
"(",
")",
":",
"# make sure that old 'status' argument still works",
"name",
"=",
"'filter'",
"if",
"name",
"==",
"'status'",
"else",
"name",
"params",
"[",
"name",
"]",
"=",
"value",
"return",
"self",
".",
"_get",
"(",
"'query/torrents'",
",",
"params",
"=",
"params",
")"
]
| Returns a list of torrents matching the supplied filters.
:param filter: Current status of the torrents.
:param category: Fetch all torrents with the supplied label.
:param sort: Sort torrents by.
:param reverse: Enable reverse sorting.
:param limit: Limit the number of torrents returned.
:param offset: Set offset (if less than 0, offset from end).
:return: list() of torrent with matching filter. | [
"Returns",
"a",
"list",
"of",
"torrents",
"matching",
"the",
"supplied",
"filters",
"."
]
| python | train | 38.9 |
nicolargo/glances | glances/plugins/glances_processlist.py | https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_processlist.py#L148-L166 | def get_nice_alert(self, value):
"""Return the alert relative to the Nice configuration list"""
value = str(value)
try:
if value in self.get_limit('nice_critical'):
return 'CRITICAL'
except KeyError:
pass
try:
if value in self.get_limit('nice_warning'):
return 'WARNING'
except KeyError:
pass
try:
if value in self.get_limit('nice_careful'):
return 'CAREFUL'
except KeyError:
pass
return 'DEFAULT' | [
"def",
"get_nice_alert",
"(",
"self",
",",
"value",
")",
":",
"value",
"=",
"str",
"(",
"value",
")",
"try",
":",
"if",
"value",
"in",
"self",
".",
"get_limit",
"(",
"'nice_critical'",
")",
":",
"return",
"'CRITICAL'",
"except",
"KeyError",
":",
"pass",
"try",
":",
"if",
"value",
"in",
"self",
".",
"get_limit",
"(",
"'nice_warning'",
")",
":",
"return",
"'WARNING'",
"except",
"KeyError",
":",
"pass",
"try",
":",
"if",
"value",
"in",
"self",
".",
"get_limit",
"(",
"'nice_careful'",
")",
":",
"return",
"'CAREFUL'",
"except",
"KeyError",
":",
"pass",
"return",
"'DEFAULT'"
]
| Return the alert relative to the Nice configuration list | [
"Return",
"the",
"alert",
"relative",
"to",
"the",
"Nice",
"configuration",
"list"
]
| python | train | 30.052632 |
mcs07/ChemDataExtractor | chemdataextractor/nlp/tag.py | https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tag.py#L106-L117 | def tag(self, tokens):
"""Return a list of (token, tag) tuples for a given list of tokens."""
tags = []
for token in tokens:
normalized = self.lexicon[token].normalized
for regex, tag in self.regexes:
if regex.match(normalized):
tags.append((token, tag))
break
else:
tags.append((token, None))
return tags | [
"def",
"tag",
"(",
"self",
",",
"tokens",
")",
":",
"tags",
"=",
"[",
"]",
"for",
"token",
"in",
"tokens",
":",
"normalized",
"=",
"self",
".",
"lexicon",
"[",
"token",
"]",
".",
"normalized",
"for",
"regex",
",",
"tag",
"in",
"self",
".",
"regexes",
":",
"if",
"regex",
".",
"match",
"(",
"normalized",
")",
":",
"tags",
".",
"append",
"(",
"(",
"token",
",",
"tag",
")",
")",
"break",
"else",
":",
"tags",
".",
"append",
"(",
"(",
"token",
",",
"None",
")",
")",
"return",
"tags"
]
| Return a list of (token, tag) tuples for a given list of tokens. | [
"Return",
"a",
"list",
"of",
"(",
"token",
"tag",
")",
"tuples",
"for",
"a",
"given",
"list",
"of",
"tokens",
"."
]
| python | train | 36.166667 |
openego/eDisGo | edisgo/flex_opt/check_tech_constraints.py | https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/flex_opt/check_tech_constraints.py#L234-L311 | def _station_load(network, station, crit_stations):
"""
Checks for over-loading of stations.
Parameters
----------
network : :class:`~.grid.network.Network`
station : :class:`~.grid.components.LVStation` or :class:`~.grid.components.MVStation`
crit_stations : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded stations, their apparent power at
maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations either of type
:class:`~.grid.components.LVStation` or
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded stations, their apparent power at
maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations either of type
:class:`~.grid.components.LVStation` or
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`.
"""
if isinstance(station, LVStation):
grid_level = 'lv'
else:
grid_level = 'mv'
# maximum allowed apparent power of station for feed-in and load case
s_station = sum([_.type.S_nom for _ in station.transformers])
s_station_allowed_per_case = {}
s_station_allowed_per_case['feedin_case'] = s_station * network.config[
'grid_expansion_load_factors']['{}_feedin_case_transformer'.format(
grid_level)]
s_station_allowed_per_case['load_case'] = s_station * network.config[
'grid_expansion_load_factors']['{}_load_case_transformer'.format(
grid_level)]
# maximum allowed apparent power of station in each time step
s_station_allowed = \
network.timeseries.timesteps_load_feedin_case.case.apply(
lambda _: s_station_allowed_per_case[_])
try:
if isinstance(station, LVStation):
s_station_pfa = network.results.s_res(
station.transformers).sum(axis=1)
else:
s_station_pfa = network.results.s_res([station]).iloc[:, 0]
s_res = s_station_allowed - s_station_pfa
s_res = s_res[s_res < 0]
# check if maximum allowed apparent power of station exceeds
# apparent power from power flow analysis at any time step
if not s_res.empty:
# find out largest relative deviation
load_factor = \
network.timeseries.timesteps_load_feedin_case.case.apply(
lambda _: network.config[
'grid_expansion_load_factors'][
'{}_{}_transformer'.format(grid_level, _)])
relative_s_res = load_factor * s_res
crit_stations = crit_stations.append(pd.DataFrame(
{'s_pfa': s_station_pfa.loc[relative_s_res.idxmin()],
'time_index': relative_s_res.idxmin()},
index=[station]))
except KeyError:
logger.debug('No results for {} station to check overloading.'.format(
grid_level.upper()))
return crit_stations | [
"def",
"_station_load",
"(",
"network",
",",
"station",
",",
"crit_stations",
")",
":",
"if",
"isinstance",
"(",
"station",
",",
"LVStation",
")",
":",
"grid_level",
"=",
"'lv'",
"else",
":",
"grid_level",
"=",
"'mv'",
"# maximum allowed apparent power of station for feed-in and load case",
"s_station",
"=",
"sum",
"(",
"[",
"_",
".",
"type",
".",
"S_nom",
"for",
"_",
"in",
"station",
".",
"transformers",
"]",
")",
"s_station_allowed_per_case",
"=",
"{",
"}",
"s_station_allowed_per_case",
"[",
"'feedin_case'",
"]",
"=",
"s_station",
"*",
"network",
".",
"config",
"[",
"'grid_expansion_load_factors'",
"]",
"[",
"'{}_feedin_case_transformer'",
".",
"format",
"(",
"grid_level",
")",
"]",
"s_station_allowed_per_case",
"[",
"'load_case'",
"]",
"=",
"s_station",
"*",
"network",
".",
"config",
"[",
"'grid_expansion_load_factors'",
"]",
"[",
"'{}_load_case_transformer'",
".",
"format",
"(",
"grid_level",
")",
"]",
"# maximum allowed apparent power of station in each time step",
"s_station_allowed",
"=",
"network",
".",
"timeseries",
".",
"timesteps_load_feedin_case",
".",
"case",
".",
"apply",
"(",
"lambda",
"_",
":",
"s_station_allowed_per_case",
"[",
"_",
"]",
")",
"try",
":",
"if",
"isinstance",
"(",
"station",
",",
"LVStation",
")",
":",
"s_station_pfa",
"=",
"network",
".",
"results",
".",
"s_res",
"(",
"station",
".",
"transformers",
")",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"else",
":",
"s_station_pfa",
"=",
"network",
".",
"results",
".",
"s_res",
"(",
"[",
"station",
"]",
")",
".",
"iloc",
"[",
":",
",",
"0",
"]",
"s_res",
"=",
"s_station_allowed",
"-",
"s_station_pfa",
"s_res",
"=",
"s_res",
"[",
"s_res",
"<",
"0",
"]",
"# check if maximum allowed apparent power of station exceeds",
"# apparent power from power flow analysis at any time step",
"if",
"not",
"s_res",
".",
"empty",
":",
"# find out largest relative deviation",
"load_factor",
"=",
"network",
".",
"timeseries",
".",
"timesteps_load_feedin_case",
".",
"case",
".",
"apply",
"(",
"lambda",
"_",
":",
"network",
".",
"config",
"[",
"'grid_expansion_load_factors'",
"]",
"[",
"'{}_{}_transformer'",
".",
"format",
"(",
"grid_level",
",",
"_",
")",
"]",
")",
"relative_s_res",
"=",
"load_factor",
"*",
"s_res",
"crit_stations",
"=",
"crit_stations",
".",
"append",
"(",
"pd",
".",
"DataFrame",
"(",
"{",
"'s_pfa'",
":",
"s_station_pfa",
".",
"loc",
"[",
"relative_s_res",
".",
"idxmin",
"(",
")",
"]",
",",
"'time_index'",
":",
"relative_s_res",
".",
"idxmin",
"(",
")",
"}",
",",
"index",
"=",
"[",
"station",
"]",
")",
")",
"except",
"KeyError",
":",
"logger",
".",
"debug",
"(",
"'No results for {} station to check overloading.'",
".",
"format",
"(",
"grid_level",
".",
"upper",
"(",
")",
")",
")",
"return",
"crit_stations"
]
| Checks for over-loading of stations.
Parameters
----------
network : :class:`~.grid.network.Network`
station : :class:`~.grid.components.LVStation` or :class:`~.grid.components.MVStation`
crit_stations : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded stations, their apparent power at
maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations either of type
:class:`~.grid.components.LVStation` or
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded stations, their apparent power at
maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations either of type
:class:`~.grid.components.LVStation` or
:class:`~.grid.components.MVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`. | [
"Checks",
"for",
"over",
"-",
"loading",
"of",
"stations",
"."
]
| python | train | 44.564103 |
apache/incubator-heron | heron/tools/tracker/src/python/handlers/metricsqueryhandler.py | https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/handlers/metricsqueryhandler.py#L78-L120 | def executeMetricsQuery(self, tmaster, queryString, start_time, end_time, callback=None):
"""
Get the specified metrics for the given query in this topology.
Returns the following dict on success:
{
"timeline": [{
"instance": <instance>,
"data": {
<start_time> : <numeric value>,
<start_time> : <numeric value>,
...
}
}, {
...
}, ...
"starttime": <numeric value>,
"endtime": <numeric value>,
},
Returns the following dict on failure:
{
"message": "..."
}
"""
query = Query(self.tracker)
metrics = yield query.execute_query(tmaster, queryString, start_time, end_time)
# Parse the response
ret = {}
ret["starttime"] = start_time
ret["endtime"] = end_time
ret["timeline"] = []
for metric in metrics:
tl = {
"data": metric.timeline
}
if metric.instance:
tl["instance"] = metric.instance
ret["timeline"].append(tl)
raise tornado.gen.Return(ret) | [
"def",
"executeMetricsQuery",
"(",
"self",
",",
"tmaster",
",",
"queryString",
",",
"start_time",
",",
"end_time",
",",
"callback",
"=",
"None",
")",
":",
"query",
"=",
"Query",
"(",
"self",
".",
"tracker",
")",
"metrics",
"=",
"yield",
"query",
".",
"execute_query",
"(",
"tmaster",
",",
"queryString",
",",
"start_time",
",",
"end_time",
")",
"# Parse the response",
"ret",
"=",
"{",
"}",
"ret",
"[",
"\"starttime\"",
"]",
"=",
"start_time",
"ret",
"[",
"\"endtime\"",
"]",
"=",
"end_time",
"ret",
"[",
"\"timeline\"",
"]",
"=",
"[",
"]",
"for",
"metric",
"in",
"metrics",
":",
"tl",
"=",
"{",
"\"data\"",
":",
"metric",
".",
"timeline",
"}",
"if",
"metric",
".",
"instance",
":",
"tl",
"[",
"\"instance\"",
"]",
"=",
"metric",
".",
"instance",
"ret",
"[",
"\"timeline\"",
"]",
".",
"append",
"(",
"tl",
")",
"raise",
"tornado",
".",
"gen",
".",
"Return",
"(",
"ret",
")"
]
| Get the specified metrics for the given query in this topology.
Returns the following dict on success:
{
"timeline": [{
"instance": <instance>,
"data": {
<start_time> : <numeric value>,
<start_time> : <numeric value>,
...
}
}, {
...
}, ...
"starttime": <numeric value>,
"endtime": <numeric value>,
},
Returns the following dict on failure:
{
"message": "..."
} | [
"Get",
"the",
"specified",
"metrics",
"for",
"the",
"given",
"query",
"in",
"this",
"topology",
".",
"Returns",
"the",
"following",
"dict",
"on",
"success",
":",
"{",
"timeline",
":",
"[",
"{",
"instance",
":",
"<instance",
">",
"data",
":",
"{",
"<start_time",
">",
":",
"<numeric",
"value",
">",
"<start_time",
">",
":",
"<numeric",
"value",
">",
"...",
"}",
"}",
"{",
"...",
"}",
"...",
"starttime",
":",
"<numeric",
"value",
">",
"endtime",
":",
"<numeric",
"value",
">",
"}"
]
| python | valid | 23.581395 |
mivade/tornadose | tornadose/handlers.py | https://github.com/mivade/tornadose/blob/d220e0e3040d24c98997eee7a8a236602b4c5159/tornadose/handlers.py#L69-L74 | def prepare(self):
"""Log access."""
request_time = 1000.0 * self.request.request_time()
access_log.info(
"%d %s %.2fms", self.get_status(),
self._request_summary(), request_time) | [
"def",
"prepare",
"(",
"self",
")",
":",
"request_time",
"=",
"1000.0",
"*",
"self",
".",
"request",
".",
"request_time",
"(",
")",
"access_log",
".",
"info",
"(",
"\"%d %s %.2fms\"",
",",
"self",
".",
"get_status",
"(",
")",
",",
"self",
".",
"_request_summary",
"(",
")",
",",
"request_time",
")"
]
| Log access. | [
"Log",
"access",
"."
]
| python | train | 37.833333 |
boriel/zxbasic | asmparse.py | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/asmparse.py#L790-L804 | def p_ind8_I(p):
""" reg8_I : LP IX PLUS expr RP
| LP IX MINUS expr RP
| LP IY PLUS expr RP
| LP IY MINUS expr RP
| LP IX PLUS pexpr RP
| LP IX MINUS pexpr RP
| LP IY PLUS pexpr RP
| LP IY MINUS pexpr RP
"""
expr = p[4]
if p[3] == '-':
expr = Expr.makenode(Container('-', p.lineno(3)), expr)
p[0] = ('(%s+N)' % p[2], expr) | [
"def",
"p_ind8_I",
"(",
"p",
")",
":",
"expr",
"=",
"p",
"[",
"4",
"]",
"if",
"p",
"[",
"3",
"]",
"==",
"'-'",
":",
"expr",
"=",
"Expr",
".",
"makenode",
"(",
"Container",
"(",
"'-'",
",",
"p",
".",
"lineno",
"(",
"3",
")",
")",
",",
"expr",
")",
"p",
"[",
"0",
"]",
"=",
"(",
"'(%s+N)'",
"%",
"p",
"[",
"2",
"]",
",",
"expr",
")"
]
| reg8_I : LP IX PLUS expr RP
| LP IX MINUS expr RP
| LP IY PLUS expr RP
| LP IY MINUS expr RP
| LP IX PLUS pexpr RP
| LP IX MINUS pexpr RP
| LP IY PLUS pexpr RP
| LP IY MINUS pexpr RP | [
"reg8_I",
":",
"LP",
"IX",
"PLUS",
"expr",
"RP",
"|",
"LP",
"IX",
"MINUS",
"expr",
"RP",
"|",
"LP",
"IY",
"PLUS",
"expr",
"RP",
"|",
"LP",
"IY",
"MINUS",
"expr",
"RP",
"|",
"LP",
"IX",
"PLUS",
"pexpr",
"RP",
"|",
"LP",
"IX",
"MINUS",
"pexpr",
"RP",
"|",
"LP",
"IY",
"PLUS",
"pexpr",
"RP",
"|",
"LP",
"IY",
"MINUS",
"pexpr",
"RP"
]
| python | train | 29.666667 |
saltstack/salt | salt/cloud/clouds/aliyun.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/aliyun.py#L454-L471 | def get_size(vm_):
'''
Return the VM's size. Used by create_node().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this VM.')
if vm_size and six.text_type(vm_size) in sizes:
return sizes[vm_size]['InstanceTypeId']
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
) | [
"def",
"get_size",
"(",
"vm_",
")",
":",
"sizes",
"=",
"avail_sizes",
"(",
")",
"vm_size",
"=",
"six",
".",
"text_type",
"(",
"config",
".",
"get_cloud_config_value",
"(",
"'size'",
",",
"vm_",
",",
"__opts__",
",",
"search_global",
"=",
"False",
")",
")",
"if",
"not",
"vm_size",
":",
"raise",
"SaltCloudNotFound",
"(",
"'No size specified for this VM.'",
")",
"if",
"vm_size",
"and",
"six",
".",
"text_type",
"(",
"vm_size",
")",
"in",
"sizes",
":",
"return",
"sizes",
"[",
"vm_size",
"]",
"[",
"'InstanceTypeId'",
"]",
"raise",
"SaltCloudNotFound",
"(",
"'The specified size, \\'{0}\\', could not be found.'",
".",
"format",
"(",
"vm_size",
")",
")"
]
| Return the VM's size. Used by create_node(). | [
"Return",
"the",
"VM",
"s",
"size",
".",
"Used",
"by",
"create_node",
"()",
"."
]
| python | train | 28.222222 |
JasonKessler/scattertext | scattertext/indexstore/IndexStoreFromList.py | https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/indexstore/IndexStoreFromList.py#L6-L20 | def build(values):
'''
Parameters
----------
values: [term, ...]
Returns
-------
IndexStore
'''
idxstore = IndexStore()
idxstore._i2val = list(values)
idxstore._val2i = {term:i for i,term in enumerate(values)}
idxstore._next_i = len(values)
return idxstore | [
"def",
"build",
"(",
"values",
")",
":",
"idxstore",
"=",
"IndexStore",
"(",
")",
"idxstore",
".",
"_i2val",
"=",
"list",
"(",
"values",
")",
"idxstore",
".",
"_val2i",
"=",
"{",
"term",
":",
"i",
"for",
"i",
",",
"term",
"in",
"enumerate",
"(",
"values",
")",
"}",
"idxstore",
".",
"_next_i",
"=",
"len",
"(",
"values",
")",
"return",
"idxstore"
]
| Parameters
----------
values: [term, ...]
Returns
-------
IndexStore | [
"Parameters",
"----------",
"values",
":",
"[",
"term",
"...",
"]"
]
| python | train | 17.933333 |
IntegralDefense/splunklib | splunklib/__init__.py | https://github.com/IntegralDefense/splunklib/blob/c3a02c83daad20cf24838f52b22cd2476f062eed/splunklib/__init__.py#L83-L102 | def query_relative(self, query, event_time=None, relative_duration_before=None, relative_duration_after=None):
"""Perform the query and calculate the time range based on the relative values."""
assert event_time is None or isinstance(event_time, datetime.datetime)
assert relative_duration_before is None or isinstance(relative_duration_before, str)
assert relative_duration_after is None or isinstance(relative_duration_after, str)
if event_time is None:
# use now as the default
event_time = datetime.datetime.now()
# use preconfigured defaults
if relative_duration_before is None:
relative_duration_before = self.relative_duration_before
if relative_duration_after is None:
relative_duration_after = self.relative_duration_after
time_start = event_time - create_timedelta(relative_duration_before)
time_end = event_time + create_timedelta(relative_duration_after)
return self.query_with_time(query, time_start, time_end) | [
"def",
"query_relative",
"(",
"self",
",",
"query",
",",
"event_time",
"=",
"None",
",",
"relative_duration_before",
"=",
"None",
",",
"relative_duration_after",
"=",
"None",
")",
":",
"assert",
"event_time",
"is",
"None",
"or",
"isinstance",
"(",
"event_time",
",",
"datetime",
".",
"datetime",
")",
"assert",
"relative_duration_before",
"is",
"None",
"or",
"isinstance",
"(",
"relative_duration_before",
",",
"str",
")",
"assert",
"relative_duration_after",
"is",
"None",
"or",
"isinstance",
"(",
"relative_duration_after",
",",
"str",
")",
"if",
"event_time",
"is",
"None",
":",
"# use now as the default ",
"event_time",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"# use preconfigured defaults",
"if",
"relative_duration_before",
"is",
"None",
":",
"relative_duration_before",
"=",
"self",
".",
"relative_duration_before",
"if",
"relative_duration_after",
"is",
"None",
":",
"relative_duration_after",
"=",
"self",
".",
"relative_duration_after",
"time_start",
"=",
"event_time",
"-",
"create_timedelta",
"(",
"relative_duration_before",
")",
"time_end",
"=",
"event_time",
"+",
"create_timedelta",
"(",
"relative_duration_after",
")",
"return",
"self",
".",
"query_with_time",
"(",
"query",
",",
"time_start",
",",
"time_end",
")"
]
| Perform the query and calculate the time range based on the relative values. | [
"Perform",
"the",
"query",
"and",
"calculate",
"the",
"time",
"range",
"based",
"on",
"the",
"relative",
"values",
"."
]
| python | train | 52.65 |
liip/taxi | taxi/timesheet/entry.py | https://github.com/liip/taxi/blob/269423c1f1ab571bd01a522819afe3e325bfbff6/taxi/timesheet/entry.py#L365-L375 | def delete_date(self, date):
"""
Remove the date line from the textual representation. This doesn't
remove any entry line.
"""
self.lines = [
line for line in self.lines
if not isinstance(line, DateLine) or line.date != date
]
self.lines = trim(self.lines) | [
"def",
"delete_date",
"(",
"self",
",",
"date",
")",
":",
"self",
".",
"lines",
"=",
"[",
"line",
"for",
"line",
"in",
"self",
".",
"lines",
"if",
"not",
"isinstance",
"(",
"line",
",",
"DateLine",
")",
"or",
"line",
".",
"date",
"!=",
"date",
"]",
"self",
".",
"lines",
"=",
"trim",
"(",
"self",
".",
"lines",
")"
]
| Remove the date line from the textual representation. This doesn't
remove any entry line. | [
"Remove",
"the",
"date",
"line",
"from",
"the",
"textual",
"representation",
".",
"This",
"doesn",
"t",
"remove",
"any",
"entry",
"line",
"."
]
| python | train | 29.727273 |
sanger-pathogens/ariba | ariba/scaffold_graph.py | https://github.com/sanger-pathogens/ariba/blob/16a0b1916ce0e886bd22550ba2d648542977001b/ariba/scaffold_graph.py#L13-L32 | def update_from_sam(self, sam, sam_reader):
'''Updates graph info from a pysam.AlignedSegment object'''
if sam.is_unmapped \
or sam.mate_is_unmapped \
or (sam.reference_id == sam.next_reference_id):
return
new_link = link.Link(sam, sam_reader, self.ref_lengths)
read_name = sam.query_name
if read_name in self.partial_links:
new_link.merge(self.partial_links[read_name])
del self.partial_links[read_name]
key = tuple(sorted((new_link.refnames[0], new_link.refnames[1])))
if key not in self.links:
self.links[key] = []
new_link.sort()
self.links[key].append(new_link)
else:
self.partial_links[read_name] = new_link | [
"def",
"update_from_sam",
"(",
"self",
",",
"sam",
",",
"sam_reader",
")",
":",
"if",
"sam",
".",
"is_unmapped",
"or",
"sam",
".",
"mate_is_unmapped",
"or",
"(",
"sam",
".",
"reference_id",
"==",
"sam",
".",
"next_reference_id",
")",
":",
"return",
"new_link",
"=",
"link",
".",
"Link",
"(",
"sam",
",",
"sam_reader",
",",
"self",
".",
"ref_lengths",
")",
"read_name",
"=",
"sam",
".",
"query_name",
"if",
"read_name",
"in",
"self",
".",
"partial_links",
":",
"new_link",
".",
"merge",
"(",
"self",
".",
"partial_links",
"[",
"read_name",
"]",
")",
"del",
"self",
".",
"partial_links",
"[",
"read_name",
"]",
"key",
"=",
"tuple",
"(",
"sorted",
"(",
"(",
"new_link",
".",
"refnames",
"[",
"0",
"]",
",",
"new_link",
".",
"refnames",
"[",
"1",
"]",
")",
")",
")",
"if",
"key",
"not",
"in",
"self",
".",
"links",
":",
"self",
".",
"links",
"[",
"key",
"]",
"=",
"[",
"]",
"new_link",
".",
"sort",
"(",
")",
"self",
".",
"links",
"[",
"key",
"]",
".",
"append",
"(",
"new_link",
")",
"else",
":",
"self",
".",
"partial_links",
"[",
"read_name",
"]",
"=",
"new_link"
]
| Updates graph info from a pysam.AlignedSegment object | [
"Updates",
"graph",
"info",
"from",
"a",
"pysam",
".",
"AlignedSegment",
"object"
]
| python | train | 38.8 |
thespacedoctor/qubits | qubits/universe.py | https://github.com/thespacedoctor/qubits/blob/3c02ace7226389841c6bb838d045c11bed61a3c2/qubits/universe.py#L484-L542 | def build_kcorrection_array(
log,
redshiftArray,
snTypesArray,
snLightCurves,
pathToOutputDirectory,
plot=True):
"""
*Given the random redshiftArray and snTypeArray, generate a dictionary of k-correction polynomials (one for each filter) for every object.*
**Key Arguments:**
- ``log`` -- logger
- ``redshiftArray`` -- the pre-generated redshift array
- ``snTypesArray`` -- the pre-generated array of random sn types
- ``snLightCurves`` -- yaml style dictionary of SN lightcurve info
- ``pathToOutputDirectory`` -- path to the output directory (provided by the user)
- ``plot`` -- generate plot?
**Return:**
- None
"""
################ > IMPORTS ################
## STANDARD LIB ##
## THIRD PARTY ##
import yaml
import numpy as np
## LOCAL APPLICATION ##
################ >ACTION(S) ################
dataDir = pathToOutputDirectory + "/k_corrections/"
filters = ['g', 'r', 'i', 'z']
fileName = pathToOutputDirectory + "/transient_light_curves.yaml"
stream = file(fileName, 'r')
generatedLCs = yaml.load(stream)
models = generatedLCs.keys()
kCorList = []
for i in range(len(redshiftArray)):
redshift = redshiftArray[i]
kCorDict = {}
for model in models:
for ffilter in filters:
filterDir = dataDir + model + "/" + ffilter
strRed = "%0.3f" % (redshift,)
fileName = filterDir + "/z" + \
str(strRed).replace(".", "pt") + "_poly.yaml"
try:
stream = file(fileName, 'r')
yamlContent = yaml.load(stream)
# log.info('yamlContent %s' % (yamlContent,))
stream.close()
flatPoly = np.poly1d(yamlContent['polyCoeffs'])
except:
flatPoly = None
kCorDict[ffilter] = flatPoly
kCorList.append(kCorDict)
kCorArray = np.array(kCorList)
return kCorArray | [
"def",
"build_kcorrection_array",
"(",
"log",
",",
"redshiftArray",
",",
"snTypesArray",
",",
"snLightCurves",
",",
"pathToOutputDirectory",
",",
"plot",
"=",
"True",
")",
":",
"################ > IMPORTS ################",
"## STANDARD LIB ##",
"## THIRD PARTY ##",
"import",
"yaml",
"import",
"numpy",
"as",
"np",
"## LOCAL APPLICATION ##",
"################ >ACTION(S) ################",
"dataDir",
"=",
"pathToOutputDirectory",
"+",
"\"/k_corrections/\"",
"filters",
"=",
"[",
"'g'",
",",
"'r'",
",",
"'i'",
",",
"'z'",
"]",
"fileName",
"=",
"pathToOutputDirectory",
"+",
"\"/transient_light_curves.yaml\"",
"stream",
"=",
"file",
"(",
"fileName",
",",
"'r'",
")",
"generatedLCs",
"=",
"yaml",
".",
"load",
"(",
"stream",
")",
"models",
"=",
"generatedLCs",
".",
"keys",
"(",
")",
"kCorList",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"redshiftArray",
")",
")",
":",
"redshift",
"=",
"redshiftArray",
"[",
"i",
"]",
"kCorDict",
"=",
"{",
"}",
"for",
"model",
"in",
"models",
":",
"for",
"ffilter",
"in",
"filters",
":",
"filterDir",
"=",
"dataDir",
"+",
"model",
"+",
"\"/\"",
"+",
"ffilter",
"strRed",
"=",
"\"%0.3f\"",
"%",
"(",
"redshift",
",",
")",
"fileName",
"=",
"filterDir",
"+",
"\"/z\"",
"+",
"str",
"(",
"strRed",
")",
".",
"replace",
"(",
"\".\"",
",",
"\"pt\"",
")",
"+",
"\"_poly.yaml\"",
"try",
":",
"stream",
"=",
"file",
"(",
"fileName",
",",
"'r'",
")",
"yamlContent",
"=",
"yaml",
".",
"load",
"(",
"stream",
")",
"# log.info('yamlContent %s' % (yamlContent,))",
"stream",
".",
"close",
"(",
")",
"flatPoly",
"=",
"np",
".",
"poly1d",
"(",
"yamlContent",
"[",
"'polyCoeffs'",
"]",
")",
"except",
":",
"flatPoly",
"=",
"None",
"kCorDict",
"[",
"ffilter",
"]",
"=",
"flatPoly",
"kCorList",
".",
"append",
"(",
"kCorDict",
")",
"kCorArray",
"=",
"np",
".",
"array",
"(",
"kCorList",
")",
"return",
"kCorArray"
]
| *Given the random redshiftArray and snTypeArray, generate a dictionary of k-correction polynomials (one for each filter) for every object.*
**Key Arguments:**
- ``log`` -- logger
- ``redshiftArray`` -- the pre-generated redshift array
- ``snTypesArray`` -- the pre-generated array of random sn types
- ``snLightCurves`` -- yaml style dictionary of SN lightcurve info
- ``pathToOutputDirectory`` -- path to the output directory (provided by the user)
- ``plot`` -- generate plot?
**Return:**
- None | [
"*",
"Given",
"the",
"random",
"redshiftArray",
"and",
"snTypeArray",
"generate",
"a",
"dictionary",
"of",
"k",
"-",
"correction",
"polynomials",
"(",
"one",
"for",
"each",
"filter",
")",
"for",
"every",
"object",
".",
"*"
]
| python | train | 34.881356 |
dpgaspar/Flask-AppBuilder | flask_appbuilder/console.py | https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/console.py#L247-L258 | def list_users(app, appbuilder):
"""
List all users on the database
"""
_appbuilder = import_application(app, appbuilder)
echo_header("List of users")
for user in _appbuilder.sm.get_all_users():
click.echo(
"username:{0} | email:{1} | role:{2}".format(
user.username, user.email, user.roles
)
) | [
"def",
"list_users",
"(",
"app",
",",
"appbuilder",
")",
":",
"_appbuilder",
"=",
"import_application",
"(",
"app",
",",
"appbuilder",
")",
"echo_header",
"(",
"\"List of users\"",
")",
"for",
"user",
"in",
"_appbuilder",
".",
"sm",
".",
"get_all_users",
"(",
")",
":",
"click",
".",
"echo",
"(",
"\"username:{0} | email:{1} | role:{2}\"",
".",
"format",
"(",
"user",
".",
"username",
",",
"user",
".",
"email",
",",
"user",
".",
"roles",
")",
")"
]
| List all users on the database | [
"List",
"all",
"users",
"on",
"the",
"database"
]
| python | train | 30.583333 |
vintasoftware/django-role-permissions | rolepermissions/roles.py | https://github.com/vintasoftware/django-role-permissions/blob/28924361e689e994e0c3575e18104a1a5abd8de6/rolepermissions/roles.py#L200-L207 | def get_user_roles(user):
"""Get a list of a users's roles."""
if user:
groups = user.groups.all() # Important! all() query may be cached on User with prefetch_related.
roles = (RolesManager.retrieve_role(group.name) for group in groups if group.name in RolesManager.get_roles_names())
return sorted(roles, key=lambda r: r.get_name() )
else:
return [] | [
"def",
"get_user_roles",
"(",
"user",
")",
":",
"if",
"user",
":",
"groups",
"=",
"user",
".",
"groups",
".",
"all",
"(",
")",
"# Important! all() query may be cached on User with prefetch_related.",
"roles",
"=",
"(",
"RolesManager",
".",
"retrieve_role",
"(",
"group",
".",
"name",
")",
"for",
"group",
"in",
"groups",
"if",
"group",
".",
"name",
"in",
"RolesManager",
".",
"get_roles_names",
"(",
")",
")",
"return",
"sorted",
"(",
"roles",
",",
"key",
"=",
"lambda",
"r",
":",
"r",
".",
"get_name",
"(",
")",
")",
"else",
":",
"return",
"[",
"]"
]
| Get a list of a users's roles. | [
"Get",
"a",
"list",
"of",
"a",
"users",
"s",
"roles",
"."
]
| python | train | 48.75 |
refenv/cijoe | deprecated/modules/cij/nvme.py | https://github.com/refenv/cijoe/blob/21d7b2ed4ff68e0a1457e7df2db27f6334f1a379/deprecated/modules/cij/nvme.py#L40-L100 | def env():
"""Verify NVME variables and construct exported variables"""
if cij.ssh.env():
cij.err("cij.nvme.env: invalid SSH environment")
return 1
nvme = cij.env_to_dict(PREFIX, REQUIRED)
nvme["DEV_PATH"] = os.path.join("/dev", nvme["DEV_NAME"])
# get version, chunks, luns and chs
try:
sysfs = os.path.join("/sys/class/block", nvme["DEV_NAME"], "lightnvm")
nvme["LNVM_VERSION"] = cat_file(os.path.join(sysfs, "version"))
if nvme["LNVM_VERSION"] == "2.0":
luns = "punits"
chs = "groups"
elif nvme["LNVM_VERSION"] == "1.2":
luns = "num_luns"
chs = "num_channels"
else:
raise RuntimeError("cij.nvme.env: invalid lnvm version: %s" % nvme["LNVM_VERSION"])
nvme["LNVM_NUM_CHUNKS"] = cat_file(os.path.join(sysfs, "chunks"))
nvme["LNVM_NUM_LUNS"] = cat_file(os.path.join(sysfs, luns))
nvme["LNVM_NUM_CHS"] = cat_file(os.path.join(sysfs, chs))
nvme["LNVM_TOTAL_LUNS"] = str(int(nvme["LNVM_NUM_LUNS"]) * int(nvme["LNVM_NUM_CHS"]))
nvme["LNVM_TOTAL_CHUNKS"] = str(int(nvme["LNVM_TOTAL_LUNS"]) * int(nvme["LNVM_NUM_CHUNKS"]))
# get spec version by identify namespace data struct
if nvme["LNVM_VERSION"] == "2.0":
cmd = ["nvme", "id-ctrl", nvme["DEV_PATH"], "--raw-binary"]
status, stdout, _ = cij.ssh.command(cmd, shell=True)
if status:
raise RuntimeError("cij.nvme.env: nvme id-ctrl fail")
buff = cij.bin.Buffer(types=IdentifyCDS, length=1)
buff.memcopy(stdout)
if buff[0].VS[1023] == 0x5a:
nvme["SPEC_VERSION"] = "Denali"
else:
nvme["SPEC_VERSION"] = "Spec20"
else:
nvme["SPEC_VERSION"] = "Spec12"
# get chunk meta information
nvme["LNVM_CHUNK_META_LENGTH"] = str(get_sizeof_descriptor_table(nvme["SPEC_VERSION"]))
nvme["LNVM_CHUNK_META_SIZE"] = str(int(nvme["LNVM_CHUNK_META_LENGTH"]) *
int(nvme["LNVM_TOTAL_CHUNKS"]))
except StandardError:
traceback.print_exc()
return 1
cij.env_export(PREFIX, EXPORTED, nvme)
return 0 | [
"def",
"env",
"(",
")",
":",
"if",
"cij",
".",
"ssh",
".",
"env",
"(",
")",
":",
"cij",
".",
"err",
"(",
"\"cij.nvme.env: invalid SSH environment\"",
")",
"return",
"1",
"nvme",
"=",
"cij",
".",
"env_to_dict",
"(",
"PREFIX",
",",
"REQUIRED",
")",
"nvme",
"[",
"\"DEV_PATH\"",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"/dev\"",
",",
"nvme",
"[",
"\"DEV_NAME\"",
"]",
")",
"# get version, chunks, luns and chs",
"try",
":",
"sysfs",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"/sys/class/block\"",
",",
"nvme",
"[",
"\"DEV_NAME\"",
"]",
",",
"\"lightnvm\"",
")",
"nvme",
"[",
"\"LNVM_VERSION\"",
"]",
"=",
"cat_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"sysfs",
",",
"\"version\"",
")",
")",
"if",
"nvme",
"[",
"\"LNVM_VERSION\"",
"]",
"==",
"\"2.0\"",
":",
"luns",
"=",
"\"punits\"",
"chs",
"=",
"\"groups\"",
"elif",
"nvme",
"[",
"\"LNVM_VERSION\"",
"]",
"==",
"\"1.2\"",
":",
"luns",
"=",
"\"num_luns\"",
"chs",
"=",
"\"num_channels\"",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"cij.nvme.env: invalid lnvm version: %s\"",
"%",
"nvme",
"[",
"\"LNVM_VERSION\"",
"]",
")",
"nvme",
"[",
"\"LNVM_NUM_CHUNKS\"",
"]",
"=",
"cat_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"sysfs",
",",
"\"chunks\"",
")",
")",
"nvme",
"[",
"\"LNVM_NUM_LUNS\"",
"]",
"=",
"cat_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"sysfs",
",",
"luns",
")",
")",
"nvme",
"[",
"\"LNVM_NUM_CHS\"",
"]",
"=",
"cat_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"sysfs",
",",
"chs",
")",
")",
"nvme",
"[",
"\"LNVM_TOTAL_LUNS\"",
"]",
"=",
"str",
"(",
"int",
"(",
"nvme",
"[",
"\"LNVM_NUM_LUNS\"",
"]",
")",
"*",
"int",
"(",
"nvme",
"[",
"\"LNVM_NUM_CHS\"",
"]",
")",
")",
"nvme",
"[",
"\"LNVM_TOTAL_CHUNKS\"",
"]",
"=",
"str",
"(",
"int",
"(",
"nvme",
"[",
"\"LNVM_TOTAL_LUNS\"",
"]",
")",
"*",
"int",
"(",
"nvme",
"[",
"\"LNVM_NUM_CHUNKS\"",
"]",
")",
")",
"# get spec version by identify namespace data struct",
"if",
"nvme",
"[",
"\"LNVM_VERSION\"",
"]",
"==",
"\"2.0\"",
":",
"cmd",
"=",
"[",
"\"nvme\"",
",",
"\"id-ctrl\"",
",",
"nvme",
"[",
"\"DEV_PATH\"",
"]",
",",
"\"--raw-binary\"",
"]",
"status",
",",
"stdout",
",",
"_",
"=",
"cij",
".",
"ssh",
".",
"command",
"(",
"cmd",
",",
"shell",
"=",
"True",
")",
"if",
"status",
":",
"raise",
"RuntimeError",
"(",
"\"cij.nvme.env: nvme id-ctrl fail\"",
")",
"buff",
"=",
"cij",
".",
"bin",
".",
"Buffer",
"(",
"types",
"=",
"IdentifyCDS",
",",
"length",
"=",
"1",
")",
"buff",
".",
"memcopy",
"(",
"stdout",
")",
"if",
"buff",
"[",
"0",
"]",
".",
"VS",
"[",
"1023",
"]",
"==",
"0x5a",
":",
"nvme",
"[",
"\"SPEC_VERSION\"",
"]",
"=",
"\"Denali\"",
"else",
":",
"nvme",
"[",
"\"SPEC_VERSION\"",
"]",
"=",
"\"Spec20\"",
"else",
":",
"nvme",
"[",
"\"SPEC_VERSION\"",
"]",
"=",
"\"Spec12\"",
"# get chunk meta information",
"nvme",
"[",
"\"LNVM_CHUNK_META_LENGTH\"",
"]",
"=",
"str",
"(",
"get_sizeof_descriptor_table",
"(",
"nvme",
"[",
"\"SPEC_VERSION\"",
"]",
")",
")",
"nvme",
"[",
"\"LNVM_CHUNK_META_SIZE\"",
"]",
"=",
"str",
"(",
"int",
"(",
"nvme",
"[",
"\"LNVM_CHUNK_META_LENGTH\"",
"]",
")",
"*",
"int",
"(",
"nvme",
"[",
"\"LNVM_TOTAL_CHUNKS\"",
"]",
")",
")",
"except",
"StandardError",
":",
"traceback",
".",
"print_exc",
"(",
")",
"return",
"1",
"cij",
".",
"env_export",
"(",
"PREFIX",
",",
"EXPORTED",
",",
"nvme",
")",
"return",
"0"
]
| Verify NVME variables and construct exported variables | [
"Verify",
"NVME",
"variables",
"and",
"construct",
"exported",
"variables"
]
| python | valid | 36.213115 |
dls-controls/pymalcolm | malcolm/core/concurrency.py | https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/core/concurrency.py#L61-L67 | def get(self, timeout=None):
# type: (float) -> T
"""Return the result or raise the error the function has produced"""
self.wait(timeout)
if isinstance(self._result, Exception):
raise self._result
return self._result | [
"def",
"get",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"# type: (float) -> T",
"self",
".",
"wait",
"(",
"timeout",
")",
"if",
"isinstance",
"(",
"self",
".",
"_result",
",",
"Exception",
")",
":",
"raise",
"self",
".",
"_result",
"return",
"self",
".",
"_result"
]
| Return the result or raise the error the function has produced | [
"Return",
"the",
"result",
"or",
"raise",
"the",
"error",
"the",
"function",
"has",
"produced"
]
| python | train | 37.428571 |
pybel/pybel | src/pybel/struct/graph.py | https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/graph.py#L558-L560 | def has_edge_evidence(self, u: BaseEntity, v: BaseEntity, key: str) -> bool:
"""Check if the given edge has an evidence."""
return self._has_edge_attr(u, v, key, EVIDENCE) | [
"def",
"has_edge_evidence",
"(",
"self",
",",
"u",
":",
"BaseEntity",
",",
"v",
":",
"BaseEntity",
",",
"key",
":",
"str",
")",
"->",
"bool",
":",
"return",
"self",
".",
"_has_edge_attr",
"(",
"u",
",",
"v",
",",
"key",
",",
"EVIDENCE",
")"
]
| Check if the given edge has an evidence. | [
"Check",
"if",
"the",
"given",
"edge",
"has",
"an",
"evidence",
"."
]
| python | train | 61.666667 |
sassoo/goldman | goldman/middleware/security/__init__.py | https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/middleware/security/__init__.py#L21-L28 | def process_request(self, req, resp):
""" Process the request before routing it.
We always enforce the use of SSL.
"""
if goldman.config.TLS_REQUIRED and req.protocol != 'https':
abort(TLSRequired) | [
"def",
"process_request",
"(",
"self",
",",
"req",
",",
"resp",
")",
":",
"if",
"goldman",
".",
"config",
".",
"TLS_REQUIRED",
"and",
"req",
".",
"protocol",
"!=",
"'https'",
":",
"abort",
"(",
"TLSRequired",
")"
]
| Process the request before routing it.
We always enforce the use of SSL. | [
"Process",
"the",
"request",
"before",
"routing",
"it",
"."
]
| python | train | 29.5 |
rigetti/quantumflow | quantumflow/qubits.py | https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/qubits.py#L233-L242 | def inner_product(vec0: QubitVector, vec1: QubitVector) -> bk.BKTensor:
""" Hilbert-Schmidt inner product between qubit vectors
The tensor rank and qubits must match.
"""
if vec0.rank != vec1.rank or vec0.qubit_nb != vec1.qubit_nb:
raise ValueError('Incompatibly vectors. Qubits and rank must match')
vec1 = vec1.permute(vec0.qubits) # Make sure qubits in same order
return bk.inner(vec0.tensor, vec1.tensor) | [
"def",
"inner_product",
"(",
"vec0",
":",
"QubitVector",
",",
"vec1",
":",
"QubitVector",
")",
"->",
"bk",
".",
"BKTensor",
":",
"if",
"vec0",
".",
"rank",
"!=",
"vec1",
".",
"rank",
"or",
"vec0",
".",
"qubit_nb",
"!=",
"vec1",
".",
"qubit_nb",
":",
"raise",
"ValueError",
"(",
"'Incompatibly vectors. Qubits and rank must match'",
")",
"vec1",
"=",
"vec1",
".",
"permute",
"(",
"vec0",
".",
"qubits",
")",
"# Make sure qubits in same order",
"return",
"bk",
".",
"inner",
"(",
"vec0",
".",
"tensor",
",",
"vec1",
".",
"tensor",
")"
]
| Hilbert-Schmidt inner product between qubit vectors
The tensor rank and qubits must match. | [
"Hilbert",
"-",
"Schmidt",
"inner",
"product",
"between",
"qubit",
"vectors"
]
| python | train | 43.4 |
mabuchilab/QNET | src/qnet/algebra/core/matrix_algebra.py | https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/matrix_algebra.py#L388-L404 | def block_matrix(A, B, C, D):
r"""Generate the operator matrix with quadrants
.. math::
\begin{pmatrix} A B \\ C D \end{pmatrix}
Args:
A (Matrix): Matrix of shape ``(n, m)``
B (Matrix): Matrix of shape ``(n, k)``
C (Matrix): Matrix of shape ``(l, m)``
D (Matrix): Matrix of shape ``(l, k)``
Returns:
Matrix: The combined block matrix ``[[A, B], [C, D]]``.
"""
return vstackm((hstackm((A, B)), hstackm((C, D)))) | [
"def",
"block_matrix",
"(",
"A",
",",
"B",
",",
"C",
",",
"D",
")",
":",
"return",
"vstackm",
"(",
"(",
"hstackm",
"(",
"(",
"A",
",",
"B",
")",
")",
",",
"hstackm",
"(",
"(",
"C",
",",
"D",
")",
")",
")",
")"
]
| r"""Generate the operator matrix with quadrants
.. math::
\begin{pmatrix} A B \\ C D \end{pmatrix}
Args:
A (Matrix): Matrix of shape ``(n, m)``
B (Matrix): Matrix of shape ``(n, k)``
C (Matrix): Matrix of shape ``(l, m)``
D (Matrix): Matrix of shape ``(l, k)``
Returns:
Matrix: The combined block matrix ``[[A, B], [C, D]]``. | [
"r",
"Generate",
"the",
"operator",
"matrix",
"with",
"quadrants"
]
| python | train | 27.588235 |
MacHu-GWU/angora-project | angora/math/interp.py | https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/math/interp.py#L265-L279 | def exam_reliability_by_datetime(
datetime_axis, datetime_new_axis, reliable_distance):
"""A datetime-version that takes datetime object list as x_axis
reliable_distance equals to the time difference in seconds.
"""
numeric_datetime_axis = [
totimestamp(a_datetime) for a_datetime in datetime_axis
]
numeric_datetime_new_axis = [
totimestamp(a_datetime) for a_datetime in datetime_new_axis
]
return exam_reliability(numeric_datetime_axis, numeric_datetime_new_axis,
reliable_distance, precision=0) | [
"def",
"exam_reliability_by_datetime",
"(",
"datetime_axis",
",",
"datetime_new_axis",
",",
"reliable_distance",
")",
":",
"numeric_datetime_axis",
"=",
"[",
"totimestamp",
"(",
"a_datetime",
")",
"for",
"a_datetime",
"in",
"datetime_axis",
"]",
"numeric_datetime_new_axis",
"=",
"[",
"totimestamp",
"(",
"a_datetime",
")",
"for",
"a_datetime",
"in",
"datetime_new_axis",
"]",
"return",
"exam_reliability",
"(",
"numeric_datetime_axis",
",",
"numeric_datetime_new_axis",
",",
"reliable_distance",
",",
"precision",
"=",
"0",
")"
]
| A datetime-version that takes datetime object list as x_axis
reliable_distance equals to the time difference in seconds. | [
"A",
"datetime",
"-",
"version",
"that",
"takes",
"datetime",
"object",
"list",
"as",
"x_axis",
"reliable_distance",
"equals",
"to",
"the",
"time",
"difference",
"in",
"seconds",
"."
]
| python | train | 37.933333 |
SpikeInterface/spikeextractors | spikeextractors/extractors/biocamrecordingextractor/biocamrecordingextractor.py | https://github.com/SpikeInterface/spikeextractors/blob/cbe3b8778a215f0bbd743af8b306856a87e438e1/spikeextractors/extractors/biocamrecordingextractor/biocamrecordingextractor.py#L94-L143 | def openBiocamFile(filename, verbose=False):
"""Open a Biocam hdf5 file, read and return the recording info, pick te correct method to access raw data, and return this to the caller."""
rf = h5py.File(filename, 'r')
# Read recording variables
recVars = rf.require_group('3BRecInfo/3BRecVars/')
# bitDepth = recVars['BitDepth'].value[0]
# maxV = recVars['MaxVolt'].value[0]
# minV = recVars['MinVolt'].value[0]
nFrames = recVars['NRecFrames'][0]
samplingRate = recVars['SamplingRate'][0]
signalInv = recVars['SignalInversion'][0]
# Read chip variables
chipVars = rf.require_group('3BRecInfo/3BMeaChip/')
nCols = chipVars['NCols'][0]
# Get the actual number of channels used in the recording
file_format = rf['3BData'].attrs.get('Version')
if file_format == 100:
nRecCh = len(rf['3BData/Raw'][0])
elif file_format == 101:
nRecCh = int(1. * rf['3BData/Raw'].shape[0] / nFrames)
else:
raise Exception('Unknown data file format.')
if verbose:
print('# 3Brain data format:', file_format, 'signal inversion', signalInv)
print('# signal range: ', recVars['MinVolt'][0], '- ', recVars['MaxVolt'][0])
print('# channels: ', nRecCh)
print('# frames: ', nFrames)
print('# sampling rate: ', samplingRate)
# get channel locations
r = rf['3BRecInfo/3BMeaStreams/Raw/Chs'][()]['Row']
c = rf['3BRecInfo/3BMeaStreams/Raw/Chs'][()]['Col']
rawIndices = np.vstack((r, c)).T
# assign channel numbers
chIndices = np.array([(x - 1) + (y - 1) * nCols for (y, x) in rawIndices])
# determine correct function to read data
if verbose:
print("# Signal inversion looks like " + str(signalInv) + ", guessing correct method for data access.")
print("# If your results look wrong, signal polarity is may be wrong.")
if file_format == 100:
if signalInv == -1:
read_function = readHDF5t_100
else:
read_function = readHDF5t_100_i
else:
if signalInv == -1:
read_function = readHDF5t_101_i
else:
read_function = readHDF5t_101
return (rf, nFrames, samplingRate, nRecCh, chIndices, file_format, signalInv, rawIndices, read_function) | [
"def",
"openBiocamFile",
"(",
"filename",
",",
"verbose",
"=",
"False",
")",
":",
"rf",
"=",
"h5py",
".",
"File",
"(",
"filename",
",",
"'r'",
")",
"# Read recording variables",
"recVars",
"=",
"rf",
".",
"require_group",
"(",
"'3BRecInfo/3BRecVars/'",
")",
"# bitDepth = recVars['BitDepth'].value[0]",
"# maxV = recVars['MaxVolt'].value[0]",
"# minV = recVars['MinVolt'].value[0]",
"nFrames",
"=",
"recVars",
"[",
"'NRecFrames'",
"]",
"[",
"0",
"]",
"samplingRate",
"=",
"recVars",
"[",
"'SamplingRate'",
"]",
"[",
"0",
"]",
"signalInv",
"=",
"recVars",
"[",
"'SignalInversion'",
"]",
"[",
"0",
"]",
"# Read chip variables",
"chipVars",
"=",
"rf",
".",
"require_group",
"(",
"'3BRecInfo/3BMeaChip/'",
")",
"nCols",
"=",
"chipVars",
"[",
"'NCols'",
"]",
"[",
"0",
"]",
"# Get the actual number of channels used in the recording",
"file_format",
"=",
"rf",
"[",
"'3BData'",
"]",
".",
"attrs",
".",
"get",
"(",
"'Version'",
")",
"if",
"file_format",
"==",
"100",
":",
"nRecCh",
"=",
"len",
"(",
"rf",
"[",
"'3BData/Raw'",
"]",
"[",
"0",
"]",
")",
"elif",
"file_format",
"==",
"101",
":",
"nRecCh",
"=",
"int",
"(",
"1.",
"*",
"rf",
"[",
"'3BData/Raw'",
"]",
".",
"shape",
"[",
"0",
"]",
"/",
"nFrames",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Unknown data file format.'",
")",
"if",
"verbose",
":",
"print",
"(",
"'# 3Brain data format:'",
",",
"file_format",
",",
"'signal inversion'",
",",
"signalInv",
")",
"print",
"(",
"'# signal range: '",
",",
"recVars",
"[",
"'MinVolt'",
"]",
"[",
"0",
"]",
",",
"'- '",
",",
"recVars",
"[",
"'MaxVolt'",
"]",
"[",
"0",
"]",
")",
"print",
"(",
"'# channels: '",
",",
"nRecCh",
")",
"print",
"(",
"'# frames: '",
",",
"nFrames",
")",
"print",
"(",
"'# sampling rate: '",
",",
"samplingRate",
")",
"# get channel locations",
"r",
"=",
"rf",
"[",
"'3BRecInfo/3BMeaStreams/Raw/Chs'",
"]",
"[",
"(",
")",
"]",
"[",
"'Row'",
"]",
"c",
"=",
"rf",
"[",
"'3BRecInfo/3BMeaStreams/Raw/Chs'",
"]",
"[",
"(",
")",
"]",
"[",
"'Col'",
"]",
"rawIndices",
"=",
"np",
".",
"vstack",
"(",
"(",
"r",
",",
"c",
")",
")",
".",
"T",
"# assign channel numbers",
"chIndices",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"x",
"-",
"1",
")",
"+",
"(",
"y",
"-",
"1",
")",
"*",
"nCols",
"for",
"(",
"y",
",",
"x",
")",
"in",
"rawIndices",
"]",
")",
"# determine correct function to read data",
"if",
"verbose",
":",
"print",
"(",
"\"# Signal inversion looks like \"",
"+",
"str",
"(",
"signalInv",
")",
"+",
"\", guessing correct method for data access.\"",
")",
"print",
"(",
"\"# If your results look wrong, signal polarity is may be wrong.\"",
")",
"if",
"file_format",
"==",
"100",
":",
"if",
"signalInv",
"==",
"-",
"1",
":",
"read_function",
"=",
"readHDF5t_100",
"else",
":",
"read_function",
"=",
"readHDF5t_100_i",
"else",
":",
"if",
"signalInv",
"==",
"-",
"1",
":",
"read_function",
"=",
"readHDF5t_101_i",
"else",
":",
"read_function",
"=",
"readHDF5t_101",
"return",
"(",
"rf",
",",
"nFrames",
",",
"samplingRate",
",",
"nRecCh",
",",
"chIndices",
",",
"file_format",
",",
"signalInv",
",",
"rawIndices",
",",
"read_function",
")"
]
| Open a Biocam hdf5 file, read and return the recording info, pick te correct method to access raw data, and return this to the caller. | [
"Open",
"a",
"Biocam",
"hdf5",
"file",
"read",
"and",
"return",
"the",
"recording",
"info",
"pick",
"te",
"correct",
"method",
"to",
"access",
"raw",
"data",
"and",
"return",
"this",
"to",
"the",
"caller",
"."
]
| python | train | 44.58 |
sveetch/boussole | boussole/watcher.py | https://github.com/sveetch/boussole/blob/22cc644e9d633f41ebfc167d427a71c1726cee21/boussole/watcher.py#L143-L161 | def compile_dependencies(self, sourcepath, include_self=False):
"""
Apply compile on all dependencies
Args:
sourcepath (string): Sass source path to compile to its
destination using project settings.
Keyword Arguments:
include_self (bool): If ``True`` the given sourcepath is add to
items to compile, else only its dependencies are compiled.
"""
items = self.inspector.parents(sourcepath)
# Also add the current event related path
if include_self:
items.add(sourcepath)
return filter(None, [self.compile_source(item) for item in items]) | [
"def",
"compile_dependencies",
"(",
"self",
",",
"sourcepath",
",",
"include_self",
"=",
"False",
")",
":",
"items",
"=",
"self",
".",
"inspector",
".",
"parents",
"(",
"sourcepath",
")",
"# Also add the current event related path",
"if",
"include_self",
":",
"items",
".",
"add",
"(",
"sourcepath",
")",
"return",
"filter",
"(",
"None",
",",
"[",
"self",
".",
"compile_source",
"(",
"item",
")",
"for",
"item",
"in",
"items",
"]",
")"
]
| Apply compile on all dependencies
Args:
sourcepath (string): Sass source path to compile to its
destination using project settings.
Keyword Arguments:
include_self (bool): If ``True`` the given sourcepath is add to
items to compile, else only its dependencies are compiled. | [
"Apply",
"compile",
"on",
"all",
"dependencies"
]
| python | train | 34.842105 |
TheHive-Project/Cortex-Analyzers | analyzers/SoltraEdge/soltra.py | https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/SoltraEdge/soltra.py#L92-L116 | def pop_object(self, element):
'''
Pop the object element if the object contains an higher TLP then allowed.
'''
redacted_text = "Redacted. Object contained TLP value higher than allowed."
element['id'] = ''
element['url'] = ''
element['type'] = ''
element['tags'] = []
element['etlp'] = None
element['title'] = redacted_text
element['tlpColor'] = element['tlpColor']
element['uploaded_on'] = ''
element['uploaded_by'] = ''
element['description'] = redacted_text
element['children_types'] = []
element['summary']['type'] = ''
element['summary']['value'] = ''
element['summary']['title'] = redacted_text
element['summary']['description'] = redacted_text
return element | [
"def",
"pop_object",
"(",
"self",
",",
"element",
")",
":",
"redacted_text",
"=",
"\"Redacted. Object contained TLP value higher than allowed.\"",
"element",
"[",
"'id'",
"]",
"=",
"''",
"element",
"[",
"'url'",
"]",
"=",
"''",
"element",
"[",
"'type'",
"]",
"=",
"''",
"element",
"[",
"'tags'",
"]",
"=",
"[",
"]",
"element",
"[",
"'etlp'",
"]",
"=",
"None",
"element",
"[",
"'title'",
"]",
"=",
"redacted_text",
"element",
"[",
"'tlpColor'",
"]",
"=",
"element",
"[",
"'tlpColor'",
"]",
"element",
"[",
"'uploaded_on'",
"]",
"=",
"''",
"element",
"[",
"'uploaded_by'",
"]",
"=",
"''",
"element",
"[",
"'description'",
"]",
"=",
"redacted_text",
"element",
"[",
"'children_types'",
"]",
"=",
"[",
"]",
"element",
"[",
"'summary'",
"]",
"[",
"'type'",
"]",
"=",
"''",
"element",
"[",
"'summary'",
"]",
"[",
"'value'",
"]",
"=",
"''",
"element",
"[",
"'summary'",
"]",
"[",
"'title'",
"]",
"=",
"redacted_text",
"element",
"[",
"'summary'",
"]",
"[",
"'description'",
"]",
"=",
"redacted_text",
"return",
"element"
]
| Pop the object element if the object contains an higher TLP then allowed. | [
"Pop",
"the",
"object",
"element",
"if",
"the",
"object",
"contains",
"an",
"higher",
"TLP",
"then",
"allowed",
"."
]
| python | train | 32.28 |
CI-WATER/gsshapy | gsshapy/modeling/framework.py | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/framework.py#L535-L674 | def run(self, subdirectory=None):
"""
Write out project file and run GSSHA simulation
"""
with tmp_chdir(self.gssha_directory):
if self.hotstart_minimal_mode:
# remove all optional output cards
for gssha_optional_output_card in self.GSSHA_OPTIONAL_OUTPUT_CARDS:
self._delete_card(gssha_optional_output_card)
# make sure running in SUPER_QUIET mode
self._update_card('SUPER_QUIET', '')
if subdirectory is None:
# give execute folder name
subdirectory = "minimal_hotstart_run_{0}to{1}" \
.format(self.event_manager.simulation_start.strftime("%Y%m%d%H%M"),
self.event_manager.simulation_end.strftime("%Y%m%d%H%M"))
else:
# give execute folder name
subdirectory = "run_{0}to{1}".format(self.event_manager.simulation_start.strftime("%Y%m%d%H%M"),
self.event_manager.simulation_end.strftime("%Y%m%d%H%M"))
# ensure unique folder naming conventions and add to exisitng event manager
prj_evt_manager = self.project_manager.projectFileEventManager
prj_event = prj_evt_manager.add_event(name=subdirectory,
subfolder=subdirectory,
session=self.db_session)
eventyml_path = self.project_manager.getCard('#GSSHAPY_EVENT_YML') \
.value.strip("'").strip('"')
prj_evt_manager.write(session=self.db_session,
directory=self.gssha_directory,
name=os.path.basename(eventyml_path))
# ensure event manager not propagated to child event
self.project_manager.deleteCard('#GSSHAPY_EVENT_YML',
db_session=self.db_session)
self.db_session.delete(self.project_manager.projectFileEventManager)
self.db_session.commit()
# make working directory
working_directory = os.path.join(self.gssha_directory, prj_event.subfolder)
try:
os.mkdir(working_directory)
except OSError:
pass
# move simulation generated files to working directory
# PRECIP_FILE, HMET_NETCDF, HMET_ASCII, CHAN_POINT_INPUT
# TODO: Move HMET_ASCII files
for sim_card in self.simulation_modified_input_cards:
if sim_card != 'MAPPING_TABLE':
self._update_card_file_location(sim_card, working_directory)
mapping_table_card = self.project_manager.getCard('MAPPING_TABLE')
if mapping_table_card:
# read in mapping table
map_table_object = self.project_manager.readInputFile('MAPPING_TABLE',
self.gssha_directory,
self.db_session,
readIndexMaps=False)
# connect index maps to main gssha directory
for indexMap in map_table_object.indexMaps:
indexMap.filename = os.path.join("..", os.path.basename(indexMap.filename))
# write copy of mapping table to working directory
map_table_filename = os.path.basename(mapping_table_card.value.strip("'").strip('"'))
map_table_object.write(session=self.db_session,
directory=working_directory,
name=map_table_filename,
writeIndexMaps=False)
# connect to other output files in main gssha directory
for gssha_card in self.project_manager.projectCards:
if gssha_card.name not in self.GSSHA_REQUIRED_OUTPUT_PATH_CARDS + \
self.GSSHA_OPTIONAL_OUTPUT_PATH_CARDS + \
tuple(self.simulation_modified_input_cards):
if gssha_card.value:
updated_value = gssha_card.value.strip('"').strip("'")
if updated_value:
if gssha_card.name == "READ_CHAN_HOTSTART":
# there are two required files
# the .dht and .qht
if os.path.exists(updated_value + '.dht') \
and os.path.exists(updated_value + '.qht'):
updated_path = os.path.join("..", os.path.basename(updated_value))
gssha_card.value = '"{0}"'.format(updated_path)
elif os.path.exists(updated_value):
updated_path = os.path.join("..", os.path.basename(updated_value))
gssha_card.value = '"{0}"'.format(updated_path)
elif gssha_card.name == '#INDEXGRID_GUID':
path_split = updated_value.split()
updated_path = os.path.basename(path_split[0].strip('"').strip("'"))
if os.path.exists(updated_path):
new_path = os.path.join("..", os.path.basename(updated_path))
try:
# Get WMS ID for Index Map as part of value
gssha_card.value = '"{0}" "{1}"'.format(new_path, path_split[1])
except:
# Like normal if the ID isn't there
gssha_card.value = '"{0}"'.format(new_path)
else:
log.warning("{0} {1} not found in project directory ...".format("#INDEXGRID_GUID", updated_path))
# make sure project path is ""
self._update_card("PROJECT_PATH", "", True)
# WRITE OUT UPDATED GSSHA PROJECT FILE
self.project_manager.write(session=self.db_session,
directory=working_directory,
name=self.project_manager.name)
with tmp_chdir(working_directory):
# RUN SIMULATION
if self.gssha_executable and find_executable(self.gssha_executable) is not None:
log.info("Running GSSHA simulation ...")
try:
run_gssha_command = [self.gssha_executable,
os.path.join(working_directory, self.project_filename)]
# run GSSHA
out = subprocess.check_output(run_gssha_command)
# write out GSSHA output
log_file_path = os.path.join(working_directory, 'simulation.log')
with open(log_file_path, mode='w') as logfile:
logfile.write(out.decode('utf-8'))
# log to other logger if debug mode on
if log.isEnabledFor(logging.DEBUG):
for line in out.split(b'\n'):
log.debug(line.decode('utf-8'))
except subprocess.CalledProcessError as ex:
log.error("{0}: {1}".format(ex.returncode, ex.output))
else:
missing_exe_error = ("GSSHA executable not found. "
"Skipping GSSHA simulation run ...")
log.error(missing_exe_error)
raise ValueError(missing_exe_error)
return working_directory | [
"def",
"run",
"(",
"self",
",",
"subdirectory",
"=",
"None",
")",
":",
"with",
"tmp_chdir",
"(",
"self",
".",
"gssha_directory",
")",
":",
"if",
"self",
".",
"hotstart_minimal_mode",
":",
"# remove all optional output cards",
"for",
"gssha_optional_output_card",
"in",
"self",
".",
"GSSHA_OPTIONAL_OUTPUT_CARDS",
":",
"self",
".",
"_delete_card",
"(",
"gssha_optional_output_card",
")",
"# make sure running in SUPER_QUIET mode",
"self",
".",
"_update_card",
"(",
"'SUPER_QUIET'",
",",
"''",
")",
"if",
"subdirectory",
"is",
"None",
":",
"# give execute folder name",
"subdirectory",
"=",
"\"minimal_hotstart_run_{0}to{1}\"",
".",
"format",
"(",
"self",
".",
"event_manager",
".",
"simulation_start",
".",
"strftime",
"(",
"\"%Y%m%d%H%M\"",
")",
",",
"self",
".",
"event_manager",
".",
"simulation_end",
".",
"strftime",
"(",
"\"%Y%m%d%H%M\"",
")",
")",
"else",
":",
"# give execute folder name",
"subdirectory",
"=",
"\"run_{0}to{1}\"",
".",
"format",
"(",
"self",
".",
"event_manager",
".",
"simulation_start",
".",
"strftime",
"(",
"\"%Y%m%d%H%M\"",
")",
",",
"self",
".",
"event_manager",
".",
"simulation_end",
".",
"strftime",
"(",
"\"%Y%m%d%H%M\"",
")",
")",
"# ensure unique folder naming conventions and add to exisitng event manager",
"prj_evt_manager",
"=",
"self",
".",
"project_manager",
".",
"projectFileEventManager",
"prj_event",
"=",
"prj_evt_manager",
".",
"add_event",
"(",
"name",
"=",
"subdirectory",
",",
"subfolder",
"=",
"subdirectory",
",",
"session",
"=",
"self",
".",
"db_session",
")",
"eventyml_path",
"=",
"self",
".",
"project_manager",
".",
"getCard",
"(",
"'#GSSHAPY_EVENT_YML'",
")",
".",
"value",
".",
"strip",
"(",
"\"'\"",
")",
".",
"strip",
"(",
"'\"'",
")",
"prj_evt_manager",
".",
"write",
"(",
"session",
"=",
"self",
".",
"db_session",
",",
"directory",
"=",
"self",
".",
"gssha_directory",
",",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"eventyml_path",
")",
")",
"# ensure event manager not propagated to child event",
"self",
".",
"project_manager",
".",
"deleteCard",
"(",
"'#GSSHAPY_EVENT_YML'",
",",
"db_session",
"=",
"self",
".",
"db_session",
")",
"self",
".",
"db_session",
".",
"delete",
"(",
"self",
".",
"project_manager",
".",
"projectFileEventManager",
")",
"self",
".",
"db_session",
".",
"commit",
"(",
")",
"# make working directory",
"working_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"gssha_directory",
",",
"prj_event",
".",
"subfolder",
")",
"try",
":",
"os",
".",
"mkdir",
"(",
"working_directory",
")",
"except",
"OSError",
":",
"pass",
"# move simulation generated files to working directory",
"# PRECIP_FILE, HMET_NETCDF, HMET_ASCII, CHAN_POINT_INPUT",
"# TODO: Move HMET_ASCII files",
"for",
"sim_card",
"in",
"self",
".",
"simulation_modified_input_cards",
":",
"if",
"sim_card",
"!=",
"'MAPPING_TABLE'",
":",
"self",
".",
"_update_card_file_location",
"(",
"sim_card",
",",
"working_directory",
")",
"mapping_table_card",
"=",
"self",
".",
"project_manager",
".",
"getCard",
"(",
"'MAPPING_TABLE'",
")",
"if",
"mapping_table_card",
":",
"# read in mapping table",
"map_table_object",
"=",
"self",
".",
"project_manager",
".",
"readInputFile",
"(",
"'MAPPING_TABLE'",
",",
"self",
".",
"gssha_directory",
",",
"self",
".",
"db_session",
",",
"readIndexMaps",
"=",
"False",
")",
"# connect index maps to main gssha directory",
"for",
"indexMap",
"in",
"map_table_object",
".",
"indexMaps",
":",
"indexMap",
".",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"..\"",
",",
"os",
".",
"path",
".",
"basename",
"(",
"indexMap",
".",
"filename",
")",
")",
"# write copy of mapping table to working directory",
"map_table_filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"mapping_table_card",
".",
"value",
".",
"strip",
"(",
"\"'\"",
")",
".",
"strip",
"(",
"'\"'",
")",
")",
"map_table_object",
".",
"write",
"(",
"session",
"=",
"self",
".",
"db_session",
",",
"directory",
"=",
"working_directory",
",",
"name",
"=",
"map_table_filename",
",",
"writeIndexMaps",
"=",
"False",
")",
"# connect to other output files in main gssha directory",
"for",
"gssha_card",
"in",
"self",
".",
"project_manager",
".",
"projectCards",
":",
"if",
"gssha_card",
".",
"name",
"not",
"in",
"self",
".",
"GSSHA_REQUIRED_OUTPUT_PATH_CARDS",
"+",
"self",
".",
"GSSHA_OPTIONAL_OUTPUT_PATH_CARDS",
"+",
"tuple",
"(",
"self",
".",
"simulation_modified_input_cards",
")",
":",
"if",
"gssha_card",
".",
"value",
":",
"updated_value",
"=",
"gssha_card",
".",
"value",
".",
"strip",
"(",
"'\"'",
")",
".",
"strip",
"(",
"\"'\"",
")",
"if",
"updated_value",
":",
"if",
"gssha_card",
".",
"name",
"==",
"\"READ_CHAN_HOTSTART\"",
":",
"# there are two required files",
"# the .dht and .qht",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"updated_value",
"+",
"'.dht'",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"updated_value",
"+",
"'.qht'",
")",
":",
"updated_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"..\"",
",",
"os",
".",
"path",
".",
"basename",
"(",
"updated_value",
")",
")",
"gssha_card",
".",
"value",
"=",
"'\"{0}\"'",
".",
"format",
"(",
"updated_path",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"updated_value",
")",
":",
"updated_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"..\"",
",",
"os",
".",
"path",
".",
"basename",
"(",
"updated_value",
")",
")",
"gssha_card",
".",
"value",
"=",
"'\"{0}\"'",
".",
"format",
"(",
"updated_path",
")",
"elif",
"gssha_card",
".",
"name",
"==",
"'#INDEXGRID_GUID'",
":",
"path_split",
"=",
"updated_value",
".",
"split",
"(",
")",
"updated_path",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path_split",
"[",
"0",
"]",
".",
"strip",
"(",
"'\"'",
")",
".",
"strip",
"(",
"\"'\"",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"updated_path",
")",
":",
"new_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"..\"",
",",
"os",
".",
"path",
".",
"basename",
"(",
"updated_path",
")",
")",
"try",
":",
"# Get WMS ID for Index Map as part of value",
"gssha_card",
".",
"value",
"=",
"'\"{0}\" \"{1}\"'",
".",
"format",
"(",
"new_path",
",",
"path_split",
"[",
"1",
"]",
")",
"except",
":",
"# Like normal if the ID isn't there",
"gssha_card",
".",
"value",
"=",
"'\"{0}\"'",
".",
"format",
"(",
"new_path",
")",
"else",
":",
"log",
".",
"warning",
"(",
"\"{0} {1} not found in project directory ...\"",
".",
"format",
"(",
"\"#INDEXGRID_GUID\"",
",",
"updated_path",
")",
")",
"# make sure project path is \"\"",
"self",
".",
"_update_card",
"(",
"\"PROJECT_PATH\"",
",",
"\"\"",
",",
"True",
")",
"# WRITE OUT UPDATED GSSHA PROJECT FILE",
"self",
".",
"project_manager",
".",
"write",
"(",
"session",
"=",
"self",
".",
"db_session",
",",
"directory",
"=",
"working_directory",
",",
"name",
"=",
"self",
".",
"project_manager",
".",
"name",
")",
"with",
"tmp_chdir",
"(",
"working_directory",
")",
":",
"# RUN SIMULATION",
"if",
"self",
".",
"gssha_executable",
"and",
"find_executable",
"(",
"self",
".",
"gssha_executable",
")",
"is",
"not",
"None",
":",
"log",
".",
"info",
"(",
"\"Running GSSHA simulation ...\"",
")",
"try",
":",
"run_gssha_command",
"=",
"[",
"self",
".",
"gssha_executable",
",",
"os",
".",
"path",
".",
"join",
"(",
"working_directory",
",",
"self",
".",
"project_filename",
")",
"]",
"# run GSSHA",
"out",
"=",
"subprocess",
".",
"check_output",
"(",
"run_gssha_command",
")",
"# write out GSSHA output",
"log_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"working_directory",
",",
"'simulation.log'",
")",
"with",
"open",
"(",
"log_file_path",
",",
"mode",
"=",
"'w'",
")",
"as",
"logfile",
":",
"logfile",
".",
"write",
"(",
"out",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"# log to other logger if debug mode on",
"if",
"log",
".",
"isEnabledFor",
"(",
"logging",
".",
"DEBUG",
")",
":",
"for",
"line",
"in",
"out",
".",
"split",
"(",
"b'\\n'",
")",
":",
"log",
".",
"debug",
"(",
"line",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"ex",
":",
"log",
".",
"error",
"(",
"\"{0}: {1}\"",
".",
"format",
"(",
"ex",
".",
"returncode",
",",
"ex",
".",
"output",
")",
")",
"else",
":",
"missing_exe_error",
"=",
"(",
"\"GSSHA executable not found. \"",
"\"Skipping GSSHA simulation run ...\"",
")",
"log",
".",
"error",
"(",
"missing_exe_error",
")",
"raise",
"ValueError",
"(",
"missing_exe_error",
")",
"return",
"working_directory"
]
| Write out project file and run GSSHA simulation | [
"Write",
"out",
"project",
"file",
"and",
"run",
"GSSHA",
"simulation"
]
| python | train | 57.692857 |
bitcraft/PyTMX | pytmx/pytmx.py | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L667-L673 | def add_tileset(self, tileset):
""" Add a tileset to the map
:param tileset: TiledTileset
"""
assert (isinstance(tileset, TiledTileset))
self.tilesets.append(tileset) | [
"def",
"add_tileset",
"(",
"self",
",",
"tileset",
")",
":",
"assert",
"(",
"isinstance",
"(",
"tileset",
",",
"TiledTileset",
")",
")",
"self",
".",
"tilesets",
".",
"append",
"(",
"tileset",
")"
]
| Add a tileset to the map
:param tileset: TiledTileset | [
"Add",
"a",
"tileset",
"to",
"the",
"map"
]
| python | train | 28.714286 |
weso/CWR-DataApi | cwr/grammar/field/special.py | https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/grammar/field/special.py#L337-L367 | def audio_visual_key(name=None):
"""
Creates the grammar for an Audio Visual Key code.
This is a variation on the ISAN (International Standard Audiovisual Number)
:param name: name for the field
:return: grammar for an ISRC field
"""
if name is None:
name = 'AVI Field'
society_code = basic.numeric(3)
society_code = society_code.setName('Society Code') \
.setResultsName('society_code')
av_number = basic.alphanum(15, extended=True, isLast=True)
field_empty = pp.Regex('[ ]{15}')
field_empty.setParseAction(pp.replaceWith(''))
av_number = av_number | field_empty
av_number = av_number.setName('Audio-Visual Number') \
.setResultsName('av_number')
field = pp.Group(society_code + pp.Optional(av_number))
field.setParseAction(lambda v: _to_avi(v[0]))
field = field.setName(name)
return field.setResultsName('audio_visual_key') | [
"def",
"audio_visual_key",
"(",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"'AVI Field'",
"society_code",
"=",
"basic",
".",
"numeric",
"(",
"3",
")",
"society_code",
"=",
"society_code",
".",
"setName",
"(",
"'Society Code'",
")",
".",
"setResultsName",
"(",
"'society_code'",
")",
"av_number",
"=",
"basic",
".",
"alphanum",
"(",
"15",
",",
"extended",
"=",
"True",
",",
"isLast",
"=",
"True",
")",
"field_empty",
"=",
"pp",
".",
"Regex",
"(",
"'[ ]{15}'",
")",
"field_empty",
".",
"setParseAction",
"(",
"pp",
".",
"replaceWith",
"(",
"''",
")",
")",
"av_number",
"=",
"av_number",
"|",
"field_empty",
"av_number",
"=",
"av_number",
".",
"setName",
"(",
"'Audio-Visual Number'",
")",
".",
"setResultsName",
"(",
"'av_number'",
")",
"field",
"=",
"pp",
".",
"Group",
"(",
"society_code",
"+",
"pp",
".",
"Optional",
"(",
"av_number",
")",
")",
"field",
".",
"setParseAction",
"(",
"lambda",
"v",
":",
"_to_avi",
"(",
"v",
"[",
"0",
"]",
")",
")",
"field",
"=",
"field",
".",
"setName",
"(",
"name",
")",
"return",
"field",
".",
"setResultsName",
"(",
"'audio_visual_key'",
")"
]
| Creates the grammar for an Audio Visual Key code.
This is a variation on the ISAN (International Standard Audiovisual Number)
:param name: name for the field
:return: grammar for an ISRC field | [
"Creates",
"the",
"grammar",
"for",
"an",
"Audio",
"Visual",
"Key",
"code",
"."
]
| python | train | 29.032258 |
bpsmith/tia | tia/analysis/model/pl.py | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/model/pl.py#L426-L493 | def report_by_year(self, summary_fct=None, years=None, ltd=1, prior_n_yrs=None, first_n_yrs=None, ranges=None,
bm_rets=None):
"""Summarize the profit and loss by year
:param summary_fct: function(ProfitAndLoss) and returns a dict or Series
:param years: int, array, boolean or None. If boolean and False, then show no years. If int or array
show only those years, else show all years if None
:param ltd: include live to date summary
:param prior_n_years: integer or list. Include summary for N years of return data prior to end date
:param first_n_years: integer or list. Include summary for N years of return data after start date
:param ranges: list of ranges. The range consists of a year start and year end
:param dm_dly_rets: daily return series for the benchmark for beta/alpha calcs
:return: DataFrame
"""
if years and np.isscalar(years):
years = [years]
if summary_fct is None:
def summary_fct(pl):
monthly = pl.monthly_details
dly = pl.dly_details
data = OrderedDict()
data['mpl avg'] = monthly.mean
data['mpl std ann'] = monthly.std_ann
data['maxdd'] = dly.maxdd
data['maxdd dt'] = dly.maxdd_dt
data['avg dd'] = dly.dd_avg
data['best month'] = monthly.max
data['worst month'] = monthly.min
data['best day'] = dly.max
data['worst day'] = dly.min
data['nmonths'] = monthly.cnt
return data
results = OrderedDict()
if years is not False:
for yr, pandl in self.iter_by_year():
if years is None or yr in years:
results[yr] = summary_fct(pandl)
# First n years
if first_n_yrs:
first_n_yrs = first_n_yrs if not np.isscalar(first_n_yrs) else [first_n_yrs]
for first in first_n_yrs:
after = '12/31/%s' % (self.dly.index[0].year + first)
firstN = self.truncate(after=after)
results['first {0}yrs'.format(first)] = summary_fct(firstN)
# Ranges
if ranges:
for range in ranges:
yr_start, yr_end = range
rng_rets = self.truncate('1/1/%s' % yr_start, '12/31/%s' % yr_end)
results['{0}-{1}'.format(yr_start, yr_end)] = summary_fct(rng_rets)
# Prior n years
if prior_n_yrs:
prior_n_yrs = prior_n_yrs if not np.isscalar(prior_n_yrs) else [prior_n_yrs]
for prior in prior_n_yrs:
before = '1/1/%s' % (self.dly.index[-1].year - prior)
priorN = self.truncate(before)
results['past {0}yrs'.format(prior)] = summary_fct(priorN)
# LTD
if ltd:
results['ltd'] = summary_fct(self)
return pd.DataFrame(results, index=results.values()[0].keys()).T | [
"def",
"report_by_year",
"(",
"self",
",",
"summary_fct",
"=",
"None",
",",
"years",
"=",
"None",
",",
"ltd",
"=",
"1",
",",
"prior_n_yrs",
"=",
"None",
",",
"first_n_yrs",
"=",
"None",
",",
"ranges",
"=",
"None",
",",
"bm_rets",
"=",
"None",
")",
":",
"if",
"years",
"and",
"np",
".",
"isscalar",
"(",
"years",
")",
":",
"years",
"=",
"[",
"years",
"]",
"if",
"summary_fct",
"is",
"None",
":",
"def",
"summary_fct",
"(",
"pl",
")",
":",
"monthly",
"=",
"pl",
".",
"monthly_details",
"dly",
"=",
"pl",
".",
"dly_details",
"data",
"=",
"OrderedDict",
"(",
")",
"data",
"[",
"'mpl avg'",
"]",
"=",
"monthly",
".",
"mean",
"data",
"[",
"'mpl std ann'",
"]",
"=",
"monthly",
".",
"std_ann",
"data",
"[",
"'maxdd'",
"]",
"=",
"dly",
".",
"maxdd",
"data",
"[",
"'maxdd dt'",
"]",
"=",
"dly",
".",
"maxdd_dt",
"data",
"[",
"'avg dd'",
"]",
"=",
"dly",
".",
"dd_avg",
"data",
"[",
"'best month'",
"]",
"=",
"monthly",
".",
"max",
"data",
"[",
"'worst month'",
"]",
"=",
"monthly",
".",
"min",
"data",
"[",
"'best day'",
"]",
"=",
"dly",
".",
"max",
"data",
"[",
"'worst day'",
"]",
"=",
"dly",
".",
"min",
"data",
"[",
"'nmonths'",
"]",
"=",
"monthly",
".",
"cnt",
"return",
"data",
"results",
"=",
"OrderedDict",
"(",
")",
"if",
"years",
"is",
"not",
"False",
":",
"for",
"yr",
",",
"pandl",
"in",
"self",
".",
"iter_by_year",
"(",
")",
":",
"if",
"years",
"is",
"None",
"or",
"yr",
"in",
"years",
":",
"results",
"[",
"yr",
"]",
"=",
"summary_fct",
"(",
"pandl",
")",
"# First n years",
"if",
"first_n_yrs",
":",
"first_n_yrs",
"=",
"first_n_yrs",
"if",
"not",
"np",
".",
"isscalar",
"(",
"first_n_yrs",
")",
"else",
"[",
"first_n_yrs",
"]",
"for",
"first",
"in",
"first_n_yrs",
":",
"after",
"=",
"'12/31/%s'",
"%",
"(",
"self",
".",
"dly",
".",
"index",
"[",
"0",
"]",
".",
"year",
"+",
"first",
")",
"firstN",
"=",
"self",
".",
"truncate",
"(",
"after",
"=",
"after",
")",
"results",
"[",
"'first {0}yrs'",
".",
"format",
"(",
"first",
")",
"]",
"=",
"summary_fct",
"(",
"firstN",
")",
"# Ranges",
"if",
"ranges",
":",
"for",
"range",
"in",
"ranges",
":",
"yr_start",
",",
"yr_end",
"=",
"range",
"rng_rets",
"=",
"self",
".",
"truncate",
"(",
"'1/1/%s'",
"%",
"yr_start",
",",
"'12/31/%s'",
"%",
"yr_end",
")",
"results",
"[",
"'{0}-{1}'",
".",
"format",
"(",
"yr_start",
",",
"yr_end",
")",
"]",
"=",
"summary_fct",
"(",
"rng_rets",
")",
"# Prior n years",
"if",
"prior_n_yrs",
":",
"prior_n_yrs",
"=",
"prior_n_yrs",
"if",
"not",
"np",
".",
"isscalar",
"(",
"prior_n_yrs",
")",
"else",
"[",
"prior_n_yrs",
"]",
"for",
"prior",
"in",
"prior_n_yrs",
":",
"before",
"=",
"'1/1/%s'",
"%",
"(",
"self",
".",
"dly",
".",
"index",
"[",
"-",
"1",
"]",
".",
"year",
"-",
"prior",
")",
"priorN",
"=",
"self",
".",
"truncate",
"(",
"before",
")",
"results",
"[",
"'past {0}yrs'",
".",
"format",
"(",
"prior",
")",
"]",
"=",
"summary_fct",
"(",
"priorN",
")",
"# LTD",
"if",
"ltd",
":",
"results",
"[",
"'ltd'",
"]",
"=",
"summary_fct",
"(",
"self",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"results",
",",
"index",
"=",
"results",
".",
"values",
"(",
")",
"[",
"0",
"]",
".",
"keys",
"(",
")",
")",
".",
"T"
]
| Summarize the profit and loss by year
:param summary_fct: function(ProfitAndLoss) and returns a dict or Series
:param years: int, array, boolean or None. If boolean and False, then show no years. If int or array
show only those years, else show all years if None
:param ltd: include live to date summary
:param prior_n_years: integer or list. Include summary for N years of return data prior to end date
:param first_n_years: integer or list. Include summary for N years of return data after start date
:param ranges: list of ranges. The range consists of a year start and year end
:param dm_dly_rets: daily return series for the benchmark for beta/alpha calcs
:return: DataFrame | [
"Summarize",
"the",
"profit",
"and",
"loss",
"by",
"year",
":",
"param",
"summary_fct",
":",
"function",
"(",
"ProfitAndLoss",
")",
"and",
"returns",
"a",
"dict",
"or",
"Series",
":",
"param",
"years",
":",
"int",
"array",
"boolean",
"or",
"None",
".",
"If",
"boolean",
"and",
"False",
"then",
"show",
"no",
"years",
".",
"If",
"int",
"or",
"array",
"show",
"only",
"those",
"years",
"else",
"show",
"all",
"years",
"if",
"None",
":",
"param",
"ltd",
":",
"include",
"live",
"to",
"date",
"summary",
":",
"param",
"prior_n_years",
":",
"integer",
"or",
"list",
".",
"Include",
"summary",
"for",
"N",
"years",
"of",
"return",
"data",
"prior",
"to",
"end",
"date",
":",
"param",
"first_n_years",
":",
"integer",
"or",
"list",
".",
"Include",
"summary",
"for",
"N",
"years",
"of",
"return",
"data",
"after",
"start",
"date",
":",
"param",
"ranges",
":",
"list",
"of",
"ranges",
".",
"The",
"range",
"consists",
"of",
"a",
"year",
"start",
"and",
"year",
"end",
":",
"param",
"dm_dly_rets",
":",
"daily",
"return",
"series",
"for",
"the",
"benchmark",
"for",
"beta",
"/",
"alpha",
"calcs",
":",
"return",
":",
"DataFrame"
]
| python | train | 44.235294 |
xperscore/alley | alley/migrations.py | https://github.com/xperscore/alley/blob/f9a5e9e2970230e38fd8a48b6a0bc1d43a38548e/alley/migrations.py#L188-L202 | def down(self, migration_id):
"""Rollback to migration."""
if not self.check_directory():
return
for migration in self.get_migrations_to_down(migration_id):
logger.info('Rollback migration %s' % migration.filename)
migration_module = self.load_migration_file(migration.filename)
if hasattr(migration_module, 'down'):
migration_module.down(self.db)
else:
logger.info('No down method on %s' % migration.filename)
self.collection.remove({'filename': migration.filename}) | [
"def",
"down",
"(",
"self",
",",
"migration_id",
")",
":",
"if",
"not",
"self",
".",
"check_directory",
"(",
")",
":",
"return",
"for",
"migration",
"in",
"self",
".",
"get_migrations_to_down",
"(",
"migration_id",
")",
":",
"logger",
".",
"info",
"(",
"'Rollback migration %s'",
"%",
"migration",
".",
"filename",
")",
"migration_module",
"=",
"self",
".",
"load_migration_file",
"(",
"migration",
".",
"filename",
")",
"if",
"hasattr",
"(",
"migration_module",
",",
"'down'",
")",
":",
"migration_module",
".",
"down",
"(",
"self",
".",
"db",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'No down method on %s'",
"%",
"migration",
".",
"filename",
")",
"self",
".",
"collection",
".",
"remove",
"(",
"{",
"'filename'",
":",
"migration",
".",
"filename",
"}",
")"
]
| Rollback to migration. | [
"Rollback",
"to",
"migration",
"."
]
| python | train | 38.933333 |
ajslater/picopt | picopt/timestamp.py | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/timestamp.py#L16-L32 | def _get_timestamp(dirname_full, remove):
"""
Get the timestamp from the timestamp file.
Optionally mark it for removal if we're going to write another one.
"""
record_filename = os.path.join(dirname_full, RECORD_FILENAME)
if not os.path.exists(record_filename):
return None
mtime = os.stat(record_filename).st_mtime
mtime_str = datetime.fromtimestamp(mtime)
print('Found timestamp {}:{}'.format(dirname_full, mtime_str))
if Settings.record_timestamp and remove:
OLD_TIMESTAMPS.add(record_filename)
return mtime | [
"def",
"_get_timestamp",
"(",
"dirname_full",
",",
"remove",
")",
":",
"record_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname_full",
",",
"RECORD_FILENAME",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"record_filename",
")",
":",
"return",
"None",
"mtime",
"=",
"os",
".",
"stat",
"(",
"record_filename",
")",
".",
"st_mtime",
"mtime_str",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"mtime",
")",
"print",
"(",
"'Found timestamp {}:{}'",
".",
"format",
"(",
"dirname_full",
",",
"mtime_str",
")",
")",
"if",
"Settings",
".",
"record_timestamp",
"and",
"remove",
":",
"OLD_TIMESTAMPS",
".",
"add",
"(",
"record_filename",
")",
"return",
"mtime"
]
| Get the timestamp from the timestamp file.
Optionally mark it for removal if we're going to write another one. | [
"Get",
"the",
"timestamp",
"from",
"the",
"timestamp",
"file",
"."
]
| python | train | 32.823529 |
idlesign/torrentool | torrentool/utils.py | https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/utils.py#L20-L34 | def humanize_filesize(bytes_size):
"""Returns human readable filesize.
:param int bytes_size:
:rtype: str
"""
if not bytes_size:
return '0 B'
names = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
name_idx = int(math.floor(math.log(bytes_size, 1024)))
size = round(bytes_size / math.pow(1024, name_idx), 2)
return '%s %s' % (size, names[name_idx]) | [
"def",
"humanize_filesize",
"(",
"bytes_size",
")",
":",
"if",
"not",
"bytes_size",
":",
"return",
"'0 B'",
"names",
"=",
"(",
"'B'",
",",
"'KB'",
",",
"'MB'",
",",
"'GB'",
",",
"'TB'",
",",
"'PB'",
",",
"'EB'",
",",
"'ZB'",
",",
"'YB'",
")",
"name_idx",
"=",
"int",
"(",
"math",
".",
"floor",
"(",
"math",
".",
"log",
"(",
"bytes_size",
",",
"1024",
")",
")",
")",
"size",
"=",
"round",
"(",
"bytes_size",
"/",
"math",
".",
"pow",
"(",
"1024",
",",
"name_idx",
")",
",",
"2",
")",
"return",
"'%s %s'",
"%",
"(",
"size",
",",
"names",
"[",
"name_idx",
"]",
")"
]
| Returns human readable filesize.
:param int bytes_size:
:rtype: str | [
"Returns",
"human",
"readable",
"filesize",
"."
]
| python | train | 26.133333 |
4degrees/riffle | source/riffle/browser.py | https://github.com/4degrees/riffle/blob/e5a0d908df8c93ff1ee7abdda8875fd1667df53d/source/riffle/browser.py#L146-L151 | def _onNavigate(self, index):
'''Handle selection of path segment.'''
if index > 0:
self.setLocation(
self._locationWidget.itemData(index), interactive=True
) | [
"def",
"_onNavigate",
"(",
"self",
",",
"index",
")",
":",
"if",
"index",
">",
"0",
":",
"self",
".",
"setLocation",
"(",
"self",
".",
"_locationWidget",
".",
"itemData",
"(",
"index",
")",
",",
"interactive",
"=",
"True",
")"
]
| Handle selection of path segment. | [
"Handle",
"selection",
"of",
"path",
"segment",
"."
]
| python | test | 34.833333 |
locationlabs/mockredis | mockredis/client.py | https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L1311-L1319 | def evalsha(self, sha, numkeys, *keys_and_args):
"""Emulates evalsha"""
if not self.script_exists(sha)[0]:
raise RedisError("Sha not registered")
script_callable = Script(self, self.shas[sha], self.load_lua_dependencies)
numkeys = max(numkeys, 0)
keys = keys_and_args[:numkeys]
args = keys_and_args[numkeys:]
return script_callable(keys, args) | [
"def",
"evalsha",
"(",
"self",
",",
"sha",
",",
"numkeys",
",",
"*",
"keys_and_args",
")",
":",
"if",
"not",
"self",
".",
"script_exists",
"(",
"sha",
")",
"[",
"0",
"]",
":",
"raise",
"RedisError",
"(",
"\"Sha not registered\"",
")",
"script_callable",
"=",
"Script",
"(",
"self",
",",
"self",
".",
"shas",
"[",
"sha",
"]",
",",
"self",
".",
"load_lua_dependencies",
")",
"numkeys",
"=",
"max",
"(",
"numkeys",
",",
"0",
")",
"keys",
"=",
"keys_and_args",
"[",
":",
"numkeys",
"]",
"args",
"=",
"keys_and_args",
"[",
"numkeys",
":",
"]",
"return",
"script_callable",
"(",
"keys",
",",
"args",
")"
]
| Emulates evalsha | [
"Emulates",
"evalsha"
]
| python | train | 44.777778 |
Riminder/python-riminder-api | riminder/webhook.py | https://github.com/Riminder/python-riminder-api/blob/01279f0ece08cf3d1dd45f76de6d9edf7fafec90/riminder/webhook.py#L102-L116 | def handle(self, request_headers={}, signature_header=None):
"""Handle request."""
if self.client.webhook_secret is None:
raise ValueError('Error: no webhook secret.')
encoded_header = self._get_signature_header(signature_header, request_headers)
decoded_request = self._decode_request(encoded_header)
if 'type' not in decoded_request:
raise ValueError("Error invalid request: no type field found.")
handler = self._getHandlerForEvent(decoded_request['type'])
if handler is None:
return
if (self._get_fct_number_of_arg(handler) == 1):
handler(decoded_request)
return
handler(decoded_request, decoded_request['type']) | [
"def",
"handle",
"(",
"self",
",",
"request_headers",
"=",
"{",
"}",
",",
"signature_header",
"=",
"None",
")",
":",
"if",
"self",
".",
"client",
".",
"webhook_secret",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Error: no webhook secret.'",
")",
"encoded_header",
"=",
"self",
".",
"_get_signature_header",
"(",
"signature_header",
",",
"request_headers",
")",
"decoded_request",
"=",
"self",
".",
"_decode_request",
"(",
"encoded_header",
")",
"if",
"'type'",
"not",
"in",
"decoded_request",
":",
"raise",
"ValueError",
"(",
"\"Error invalid request: no type field found.\"",
")",
"handler",
"=",
"self",
".",
"_getHandlerForEvent",
"(",
"decoded_request",
"[",
"'type'",
"]",
")",
"if",
"handler",
"is",
"None",
":",
"return",
"if",
"(",
"self",
".",
"_get_fct_number_of_arg",
"(",
"handler",
")",
"==",
"1",
")",
":",
"handler",
"(",
"decoded_request",
")",
"return",
"handler",
"(",
"decoded_request",
",",
"decoded_request",
"[",
"'type'",
"]",
")"
]
| Handle request. | [
"Handle",
"request",
"."
]
| python | train | 48.933333 |
gem/oq-engine | openquake/hazardlib/gsim/kale_2015.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/kale_2015.py#L129-L138 | def _compute_magnitude_scaling_term(self, C, mag):
"""
Compute and return magnitude scaling term in equation 2,
page 970.
"""
c1 = self.CONSTS['c1']
if mag <= c1:
return C['b1'] + C['b2'] * (mag - c1) + C['b3'] * (8.5 - mag) ** 2
else:
return C['b1'] + C['b7'] * (mag - c1) + C['b3'] * (8.5 - mag) ** 2 | [
"def",
"_compute_magnitude_scaling_term",
"(",
"self",
",",
"C",
",",
"mag",
")",
":",
"c1",
"=",
"self",
".",
"CONSTS",
"[",
"'c1'",
"]",
"if",
"mag",
"<=",
"c1",
":",
"return",
"C",
"[",
"'b1'",
"]",
"+",
"C",
"[",
"'b2'",
"]",
"*",
"(",
"mag",
"-",
"c1",
")",
"+",
"C",
"[",
"'b3'",
"]",
"*",
"(",
"8.5",
"-",
"mag",
")",
"**",
"2",
"else",
":",
"return",
"C",
"[",
"'b1'",
"]",
"+",
"C",
"[",
"'b7'",
"]",
"*",
"(",
"mag",
"-",
"c1",
")",
"+",
"C",
"[",
"'b3'",
"]",
"*",
"(",
"8.5",
"-",
"mag",
")",
"**",
"2"
]
| Compute and return magnitude scaling term in equation 2,
page 970. | [
"Compute",
"and",
"return",
"magnitude",
"scaling",
"term",
"in",
"equation",
"2",
"page",
"970",
"."
]
| python | train | 38.2 |
thespacedoctor/qubits | qubits/universe.py | https://github.com/thespacedoctor/qubits/blob/3c02ace7226389841c6bb838d045c11bed61a3c2/qubits/universe.py#L148-L238 | def random_sn_types_array(
log,
sampleNumber,
relativeSNRates,
pathToOutputPlotDirectory,
plot=False):
"""
*Generate random supernova types from the weighted distributions set in the simulation settings file*
**Key Arguments:**
- ``log`` -- logger
- ``sampleNumber`` -- the sample number, i.e. array size
- ``relativeSNRates`` -- dictionary of the rates
- ``pathToOutputPlotDirectory`` -- path to the output directory (provided by the user)
- ``plot`` -- generate plot?
**Return:**
- ``snTypesArray`` -- numpy array of the random SN types
"""
################ > IMPORTS ################
## STANDARD LIB ##
## THIRD PARTY ##
## LOCAL APPLICATION ##
import numpy as np
import matplotlib.pyplot as plt
################ > VARIABLE SETTINGS ######
################ >ACTION(S) ################
randomSNTypeList = []
# CREATE COUNTERS FOR PLOTTING
counters = {}
for k, v in relativeSNRates.iteritems():
counters[k] = 0
for i in range(sampleNumber):
randNum = np.random.rand()
cumulative = 0.
for k, v in relativeSNRates.iteritems():
cumulative = cumulative + v
if (randNum <= cumulative):
randType = k
counters[k] += 1
break
randomSNTypeList.append(randType)
# for k, v in relativeSNRates.iteritems():
# log.debug('%s = %s' % (k, counters[k]))
snTypeArray = np.array(randomSNTypeList)
if plot:
numTypes = len(relativeSNRates)
x = np.arange(1, numTypes + 1, 1)
heights = []
xticks = []
for k, v in relativeSNRates.iteritems():
xticks.append(k)
heights.append(counters[k])
fig = plt.figure(
num=None,
figsize=(8, 8),
dpi=None,
facecolor=None,
edgecolor=None,
frameon=True)
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8])
ax.bar(
x,
heights,
width=0.8,
bottom=0)
plt.xticks(x + 0.5, xticks)
ax.set_xlabel('SN Type')
ax.set_ylabel('Number of SNe')
ax.grid(True)
title = "Weighted SN Distribution"
plt.title(title)
fileName = pathToOutputPlotDirectory + title.replace(" ", "_") + ".png"
plt.savefig(fileName)
plt.clf() # clear figure
return snTypeArray | [
"def",
"random_sn_types_array",
"(",
"log",
",",
"sampleNumber",
",",
"relativeSNRates",
",",
"pathToOutputPlotDirectory",
",",
"plot",
"=",
"False",
")",
":",
"################ > IMPORTS ################",
"## STANDARD LIB ##",
"## THIRD PARTY ##",
"## LOCAL APPLICATION ##",
"import",
"numpy",
"as",
"np",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"################ > VARIABLE SETTINGS ######",
"################ >ACTION(S) ################",
"randomSNTypeList",
"=",
"[",
"]",
"# CREATE COUNTERS FOR PLOTTING",
"counters",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"relativeSNRates",
".",
"iteritems",
"(",
")",
":",
"counters",
"[",
"k",
"]",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"sampleNumber",
")",
":",
"randNum",
"=",
"np",
".",
"random",
".",
"rand",
"(",
")",
"cumulative",
"=",
"0.",
"for",
"k",
",",
"v",
"in",
"relativeSNRates",
".",
"iteritems",
"(",
")",
":",
"cumulative",
"=",
"cumulative",
"+",
"v",
"if",
"(",
"randNum",
"<=",
"cumulative",
")",
":",
"randType",
"=",
"k",
"counters",
"[",
"k",
"]",
"+=",
"1",
"break",
"randomSNTypeList",
".",
"append",
"(",
"randType",
")",
"# for k, v in relativeSNRates.iteritems():",
"# log.debug('%s = %s' % (k, counters[k]))",
"snTypeArray",
"=",
"np",
".",
"array",
"(",
"randomSNTypeList",
")",
"if",
"plot",
":",
"numTypes",
"=",
"len",
"(",
"relativeSNRates",
")",
"x",
"=",
"np",
".",
"arange",
"(",
"1",
",",
"numTypes",
"+",
"1",
",",
"1",
")",
"heights",
"=",
"[",
"]",
"xticks",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"relativeSNRates",
".",
"iteritems",
"(",
")",
":",
"xticks",
".",
"append",
"(",
"k",
")",
"heights",
".",
"append",
"(",
"counters",
"[",
"k",
"]",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
"num",
"=",
"None",
",",
"figsize",
"=",
"(",
"8",
",",
"8",
")",
",",
"dpi",
"=",
"None",
",",
"facecolor",
"=",
"None",
",",
"edgecolor",
"=",
"None",
",",
"frameon",
"=",
"True",
")",
"ax",
"=",
"fig",
".",
"add_axes",
"(",
"[",
"0.1",
",",
"0.1",
",",
"0.8",
",",
"0.8",
"]",
")",
"ax",
".",
"bar",
"(",
"x",
",",
"heights",
",",
"width",
"=",
"0.8",
",",
"bottom",
"=",
"0",
")",
"plt",
".",
"xticks",
"(",
"x",
"+",
"0.5",
",",
"xticks",
")",
"ax",
".",
"set_xlabel",
"(",
"'SN Type'",
")",
"ax",
".",
"set_ylabel",
"(",
"'Number of SNe'",
")",
"ax",
".",
"grid",
"(",
"True",
")",
"title",
"=",
"\"Weighted SN Distribution\"",
"plt",
".",
"title",
"(",
"title",
")",
"fileName",
"=",
"pathToOutputPlotDirectory",
"+",
"title",
".",
"replace",
"(",
"\" \"",
",",
"\"_\"",
")",
"+",
"\".png\"",
"plt",
".",
"savefig",
"(",
"fileName",
")",
"plt",
".",
"clf",
"(",
")",
"# clear figure",
"return",
"snTypeArray"
]
| *Generate random supernova types from the weighted distributions set in the simulation settings file*
**Key Arguments:**
- ``log`` -- logger
- ``sampleNumber`` -- the sample number, i.e. array size
- ``relativeSNRates`` -- dictionary of the rates
- ``pathToOutputPlotDirectory`` -- path to the output directory (provided by the user)
- ``plot`` -- generate plot?
**Return:**
- ``snTypesArray`` -- numpy array of the random SN types | [
"*",
"Generate",
"random",
"supernova",
"types",
"from",
"the",
"weighted",
"distributions",
"set",
"in",
"the",
"simulation",
"settings",
"file",
"*"
]
| python | train | 26.912088 |
eddieantonio/perfection | perfection/czech.py | https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/czech.py#L122-L148 | def generate_or_fail(self):
"""
Attempts to generate a random acyclic graph, raising an
InvariantError if unable to.
"""
t1 = self.generate_random_table()
t2 = self.generate_random_table()
f1 = self.generate_func(t1)
f2 = self.generate_func(t2)
edges = [(f1(word), f2(word)) for word in self.words]
# Try to generate that graph, mack!
# Note that failure to generate the graph here should be caught
# by the caller.
graph = forest.ForestGraph(edges=edges)
# Associate each edge with its corresponding word.
associations = {}
for num in range(len(self.words)):
edge = edges[num]
word = self.words[num]
associations[graph.canonical_order(edge)] = (num, word)
# Assign all of these to the object.
for name in ('t1', 't2', 'f1', 'f2', 'graph', 'associations'):
self.__dict__[name] = locals()[name] | [
"def",
"generate_or_fail",
"(",
"self",
")",
":",
"t1",
"=",
"self",
".",
"generate_random_table",
"(",
")",
"t2",
"=",
"self",
".",
"generate_random_table",
"(",
")",
"f1",
"=",
"self",
".",
"generate_func",
"(",
"t1",
")",
"f2",
"=",
"self",
".",
"generate_func",
"(",
"t2",
")",
"edges",
"=",
"[",
"(",
"f1",
"(",
"word",
")",
",",
"f2",
"(",
"word",
")",
")",
"for",
"word",
"in",
"self",
".",
"words",
"]",
"# Try to generate that graph, mack!",
"# Note that failure to generate the graph here should be caught",
"# by the caller.",
"graph",
"=",
"forest",
".",
"ForestGraph",
"(",
"edges",
"=",
"edges",
")",
"# Associate each edge with its corresponding word.",
"associations",
"=",
"{",
"}",
"for",
"num",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"words",
")",
")",
":",
"edge",
"=",
"edges",
"[",
"num",
"]",
"word",
"=",
"self",
".",
"words",
"[",
"num",
"]",
"associations",
"[",
"graph",
".",
"canonical_order",
"(",
"edge",
")",
"]",
"=",
"(",
"num",
",",
"word",
")",
"# Assign all of these to the object.",
"for",
"name",
"in",
"(",
"'t1'",
",",
"'t2'",
",",
"'f1'",
",",
"'f2'",
",",
"'graph'",
",",
"'associations'",
")",
":",
"self",
".",
"__dict__",
"[",
"name",
"]",
"=",
"locals",
"(",
")",
"[",
"name",
"]"
]
| Attempts to generate a random acyclic graph, raising an
InvariantError if unable to. | [
"Attempts",
"to",
"generate",
"a",
"random",
"acyclic",
"graph",
"raising",
"an",
"InvariantError",
"if",
"unable",
"to",
"."
]
| python | train | 35.666667 |
daviddrysdale/python-phonenumbers | python/phonenumbers/unicode_util.py | https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/unicode_util.py#L378-L394 | def get(cls, uni_char):
"""Return the Unicode block of the given Unicode character"""
uni_char = unicod(uni_char) # Force to Unicode
code_point = ord(uni_char)
if Block._RANGE_KEYS is None:
Block._RANGE_KEYS = sorted(Block._RANGES.keys())
idx = bisect.bisect_left(Block._RANGE_KEYS, code_point)
if (idx > 0 and
code_point >= Block._RANGES[Block._RANGE_KEYS[idx - 1]].start and
code_point <= Block._RANGES[Block._RANGE_KEYS[idx - 1]].end):
return Block._RANGES[Block._RANGE_KEYS[idx - 1]]
elif (idx < len(Block._RANGES) and
code_point >= Block._RANGES[Block._RANGE_KEYS[idx]].start and
code_point <= Block._RANGES[Block._RANGE_KEYS[idx]].end):
return Block._RANGES[Block._RANGE_KEYS[idx]]
else:
return Block.UNKNOWN | [
"def",
"get",
"(",
"cls",
",",
"uni_char",
")",
":",
"uni_char",
"=",
"unicod",
"(",
"uni_char",
")",
"# Force to Unicode",
"code_point",
"=",
"ord",
"(",
"uni_char",
")",
"if",
"Block",
".",
"_RANGE_KEYS",
"is",
"None",
":",
"Block",
".",
"_RANGE_KEYS",
"=",
"sorted",
"(",
"Block",
".",
"_RANGES",
".",
"keys",
"(",
")",
")",
"idx",
"=",
"bisect",
".",
"bisect_left",
"(",
"Block",
".",
"_RANGE_KEYS",
",",
"code_point",
")",
"if",
"(",
"idx",
">",
"0",
"and",
"code_point",
">=",
"Block",
".",
"_RANGES",
"[",
"Block",
".",
"_RANGE_KEYS",
"[",
"idx",
"-",
"1",
"]",
"]",
".",
"start",
"and",
"code_point",
"<=",
"Block",
".",
"_RANGES",
"[",
"Block",
".",
"_RANGE_KEYS",
"[",
"idx",
"-",
"1",
"]",
"]",
".",
"end",
")",
":",
"return",
"Block",
".",
"_RANGES",
"[",
"Block",
".",
"_RANGE_KEYS",
"[",
"idx",
"-",
"1",
"]",
"]",
"elif",
"(",
"idx",
"<",
"len",
"(",
"Block",
".",
"_RANGES",
")",
"and",
"code_point",
">=",
"Block",
".",
"_RANGES",
"[",
"Block",
".",
"_RANGE_KEYS",
"[",
"idx",
"]",
"]",
".",
"start",
"and",
"code_point",
"<=",
"Block",
".",
"_RANGES",
"[",
"Block",
".",
"_RANGE_KEYS",
"[",
"idx",
"]",
"]",
".",
"end",
")",
":",
"return",
"Block",
".",
"_RANGES",
"[",
"Block",
".",
"_RANGE_KEYS",
"[",
"idx",
"]",
"]",
"else",
":",
"return",
"Block",
".",
"UNKNOWN"
]
| Return the Unicode block of the given Unicode character | [
"Return",
"the",
"Unicode",
"block",
"of",
"the",
"given",
"Unicode",
"character"
]
| python | train | 50.764706 |
GNS3/gns3-server | gns3server/utils/interfaces.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/utils/interfaces.py#L138-L165 | def is_interface_up(interface):
"""
Checks if an interface is up.
:param interface: interface name
:returns: boolean
"""
if sys.platform.startswith("linux"):
if interface not in psutil.net_if_addrs():
return False
import fcntl
SIOCGIFFLAGS = 0x8913
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
result = fcntl.ioctl(s.fileno(), SIOCGIFFLAGS, interface + '\0' * 256)
flags, = struct.unpack('H', result[16:18])
if flags & 1: # check if the up bit is set
return True
return False
except OSError as e:
raise aiohttp.web.HTTPInternalServerError(text="Exception when checking if {} is up: {}".format(interface, e))
else:
# TODO: Windows & OSX support
return True | [
"def",
"is_interface_up",
"(",
"interface",
")",
":",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"\"linux\"",
")",
":",
"if",
"interface",
"not",
"in",
"psutil",
".",
"net_if_addrs",
"(",
")",
":",
"return",
"False",
"import",
"fcntl",
"SIOCGIFFLAGS",
"=",
"0x8913",
"try",
":",
"with",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_DGRAM",
")",
"as",
"s",
":",
"result",
"=",
"fcntl",
".",
"ioctl",
"(",
"s",
".",
"fileno",
"(",
")",
",",
"SIOCGIFFLAGS",
",",
"interface",
"+",
"'\\0'",
"*",
"256",
")",
"flags",
",",
"=",
"struct",
".",
"unpack",
"(",
"'H'",
",",
"result",
"[",
"16",
":",
"18",
"]",
")",
"if",
"flags",
"&",
"1",
":",
"# check if the up bit is set",
"return",
"True",
"return",
"False",
"except",
"OSError",
"as",
"e",
":",
"raise",
"aiohttp",
".",
"web",
".",
"HTTPInternalServerError",
"(",
"text",
"=",
"\"Exception when checking if {} is up: {}\"",
".",
"format",
"(",
"interface",
",",
"e",
")",
")",
"else",
":",
"# TODO: Windows & OSX support",
"return",
"True"
]
| Checks if an interface is up.
:param interface: interface name
:returns: boolean | [
"Checks",
"if",
"an",
"interface",
"is",
"up",
"."
]
| python | train | 30.5 |
omza/azurestoragewrap | azurestoragewrap/blob.py | https://github.com/omza/azurestoragewrap/blob/976878e95d82ff0f7d8a00a5e4a7a3fb6268ab08/azurestoragewrap/blob.py#L476-L495 | def delete(self, storagemodel:object, modeldefinition = None) -> bool:
""" delete the blob from storage """
deleted = False
blobservice = modeldefinition['blobservice']
container_name = modeldefinition['container']
blob_name = storagemodel.name
try:
if blobservice.exists(container_name, blob_name):
""" delete """
blob = blobservice.delete_blob(container_name, blob_name)
deleted = True
except Exception as e:
msg = 'can not delete blob {} from storage because {!s}'.format(blob_name, e)
raise AzureStorageWrapException(storagemodel, msg=msg)
return deleted | [
"def",
"delete",
"(",
"self",
",",
"storagemodel",
":",
"object",
",",
"modeldefinition",
"=",
"None",
")",
"->",
"bool",
":",
"deleted",
"=",
"False",
"blobservice",
"=",
"modeldefinition",
"[",
"'blobservice'",
"]",
"container_name",
"=",
"modeldefinition",
"[",
"'container'",
"]",
"blob_name",
"=",
"storagemodel",
".",
"name",
"try",
":",
"if",
"blobservice",
".",
"exists",
"(",
"container_name",
",",
"blob_name",
")",
":",
"\"\"\" delete \"\"\"",
"blob",
"=",
"blobservice",
".",
"delete_blob",
"(",
"container_name",
",",
"blob_name",
")",
"deleted",
"=",
"True",
"except",
"Exception",
"as",
"e",
":",
"msg",
"=",
"'can not delete blob {} from storage because {!s}'",
".",
"format",
"(",
"blob_name",
",",
"e",
")",
"raise",
"AzureStorageWrapException",
"(",
"storagemodel",
",",
"msg",
"=",
"msg",
")",
"return",
"deleted"
]
| delete the blob from storage | [
"delete",
"the",
"blob",
"from",
"storage"
]
| python | train | 35.45 |
marcomusy/vtkplotter | vtkplotter/actors.py | https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/actors.py#L256-L264 | def x(self, position=None):
"""Set/Get actor position along x axis."""
p = self.GetPosition()
if position is None:
return p[0]
self.SetPosition(position, p[1], p[2])
if self.trail:
self.updateTrail()
return self | [
"def",
"x",
"(",
"self",
",",
"position",
"=",
"None",
")",
":",
"p",
"=",
"self",
".",
"GetPosition",
"(",
")",
"if",
"position",
"is",
"None",
":",
"return",
"p",
"[",
"0",
"]",
"self",
".",
"SetPosition",
"(",
"position",
",",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"2",
"]",
")",
"if",
"self",
".",
"trail",
":",
"self",
".",
"updateTrail",
"(",
")",
"return",
"self"
]
| Set/Get actor position along x axis. | [
"Set",
"/",
"Get",
"actor",
"position",
"along",
"x",
"axis",
"."
]
| python | train | 30.555556 |
happyleavesaoc/aoc-mgz | mgz/recorded_game/__init__.py | https://github.com/happyleavesaoc/aoc-mgz/blob/13fc379cc062d7640bfa028eed9c0d45d37a7b2b/mgz/recorded_game/__init__.py#L359-L362 | def players(self, postgame, game_type):
"""Return parsed players."""
for i, attributes in self._players():
yield self._parse_player(i, attributes, postgame, game_type) | [
"def",
"players",
"(",
"self",
",",
"postgame",
",",
"game_type",
")",
":",
"for",
"i",
",",
"attributes",
"in",
"self",
".",
"_players",
"(",
")",
":",
"yield",
"self",
".",
"_parse_player",
"(",
"i",
",",
"attributes",
",",
"postgame",
",",
"game_type",
")"
]
| Return parsed players. | [
"Return",
"parsed",
"players",
"."
]
| python | train | 48 |
summa-tx/riemann | riemann/tx/tx.py | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx.py#L668-L694 | def _hash_outputs(self, index, sighash_type):
'''BIP143 hashOutputs implementation
Args:
index (int): index of input being signed
sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL
Returns:
(bytes): the hashOutputs, a 32 byte hash
'''
if sighash_type == shared.SIGHASH_ALL:
# If the sighash type is ALL,
# hashOutputs is the double SHA256 of all output amounts
# paired up with their scriptPubKey;
outputs = ByteData()
for tx_out in self.tx_outs:
outputs += tx_out.to_bytes()
return utils.hash256(outputs.to_bytes())
elif (sighash_type == shared.SIGHASH_SINGLE
and index < len(self.tx_outs)):
# if sighash type is SINGLE
# and the input index is smaller than the number of outputs,
# hashOutputs is the double SHA256 of the output at the same index
return utils.hash256(self.tx_outs[index].to_bytes())
else:
# Otherwise, hashOutputs is a uint256 of 0x0000......0000
raise NotImplementedError(
'I refuse to implement the SIGHASH_SINGLE bug.') | [
"def",
"_hash_outputs",
"(",
"self",
",",
"index",
",",
"sighash_type",
")",
":",
"if",
"sighash_type",
"==",
"shared",
".",
"SIGHASH_ALL",
":",
"# If the sighash type is ALL,",
"# hashOutputs is the double SHA256 of all output amounts",
"# paired up with their scriptPubKey;",
"outputs",
"=",
"ByteData",
"(",
")",
"for",
"tx_out",
"in",
"self",
".",
"tx_outs",
":",
"outputs",
"+=",
"tx_out",
".",
"to_bytes",
"(",
")",
"return",
"utils",
".",
"hash256",
"(",
"outputs",
".",
"to_bytes",
"(",
")",
")",
"elif",
"(",
"sighash_type",
"==",
"shared",
".",
"SIGHASH_SINGLE",
"and",
"index",
"<",
"len",
"(",
"self",
".",
"tx_outs",
")",
")",
":",
"# if sighash type is SINGLE",
"# and the input index is smaller than the number of outputs,",
"# hashOutputs is the double SHA256 of the output at the same index",
"return",
"utils",
".",
"hash256",
"(",
"self",
".",
"tx_outs",
"[",
"index",
"]",
".",
"to_bytes",
"(",
")",
")",
"else",
":",
"# Otherwise, hashOutputs is a uint256 of 0x0000......0000",
"raise",
"NotImplementedError",
"(",
"'I refuse to implement the SIGHASH_SINGLE bug.'",
")"
]
| BIP143 hashOutputs implementation
Args:
index (int): index of input being signed
sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL
Returns:
(bytes): the hashOutputs, a 32 byte hash | [
"BIP143",
"hashOutputs",
"implementation"
]
| python | train | 44.592593 |
adrn/gala | gala/potential/frame/builtin/transformations.py | https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/potential/frame/builtin/transformations.py#L122-L142 | def constantrotating_to_static(frame_r, frame_i, w, t=None):
"""
Transform from a constantly rotating frame to a static, inertial frame.
Parameters
----------
frame_i : `~gala.potential.StaticFrame`
frame_r : `~gala.potential.ConstantRotatingFrame`
w : `~gala.dynamics.PhaseSpacePosition`, `~gala.dynamics.Orbit`
t : quantity_like (optional)
Required if input coordinates are just a phase-space position.
Returns
-------
pos : `~astropy.units.Quantity`
Position in static, inertial frame.
vel : `~astropy.units.Quantity`
Velocity in static, inertial frame.
"""
return _constantrotating_static_helper(frame_r=frame_r, frame_i=frame_i,
w=w, t=t, sign=-1.) | [
"def",
"constantrotating_to_static",
"(",
"frame_r",
",",
"frame_i",
",",
"w",
",",
"t",
"=",
"None",
")",
":",
"return",
"_constantrotating_static_helper",
"(",
"frame_r",
"=",
"frame_r",
",",
"frame_i",
"=",
"frame_i",
",",
"w",
"=",
"w",
",",
"t",
"=",
"t",
",",
"sign",
"=",
"-",
"1.",
")"
]
| Transform from a constantly rotating frame to a static, inertial frame.
Parameters
----------
frame_i : `~gala.potential.StaticFrame`
frame_r : `~gala.potential.ConstantRotatingFrame`
w : `~gala.dynamics.PhaseSpacePosition`, `~gala.dynamics.Orbit`
t : quantity_like (optional)
Required if input coordinates are just a phase-space position.
Returns
-------
pos : `~astropy.units.Quantity`
Position in static, inertial frame.
vel : `~astropy.units.Quantity`
Velocity in static, inertial frame. | [
"Transform",
"from",
"a",
"constantly",
"rotating",
"frame",
"to",
"a",
"static",
"inertial",
"frame",
"."
]
| python | train | 36.095238 |
thebigmunch/gmusicapi-wrapper | gmusicapi_wrapper/mobileclient.py | https://github.com/thebigmunch/gmusicapi-wrapper/blob/8708683cd33955def1378fc28319ef37805b851d/gmusicapi_wrapper/mobileclient.py#L88-L123 | def get_google_songs(self, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):
"""Create song list from user's Google Music library.
Parameters:
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Mobileclient client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Mobileclient client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of Google Music song dicts matching criteria and
a list of Google Music song dicts filtered out using filter criteria.
"""
logger.info("Loading Google Music songs...")
google_songs = self.api.get_all_songs()
matched_songs, filtered_songs = filter_google_songs(
google_songs, include_filters=include_filters, exclude_filters=exclude_filters,
all_includes=all_includes, all_excludes=all_excludes
)
logger.info("Filtered {0} Google Music songs".format(len(filtered_songs)))
logger.info("Loaded {0} Google Music songs".format(len(matched_songs)))
return matched_songs, filtered_songs | [
"def",
"get_google_songs",
"(",
"self",
",",
"include_filters",
"=",
"None",
",",
"exclude_filters",
"=",
"None",
",",
"all_includes",
"=",
"False",
",",
"all_excludes",
"=",
"False",
")",
":",
"logger",
".",
"info",
"(",
"\"Loading Google Music songs...\"",
")",
"google_songs",
"=",
"self",
".",
"api",
".",
"get_all_songs",
"(",
")",
"matched_songs",
",",
"filtered_songs",
"=",
"filter_google_songs",
"(",
"google_songs",
",",
"include_filters",
"=",
"include_filters",
",",
"exclude_filters",
"=",
"exclude_filters",
",",
"all_includes",
"=",
"all_includes",
",",
"all_excludes",
"=",
"all_excludes",
")",
"logger",
".",
"info",
"(",
"\"Filtered {0} Google Music songs\"",
".",
"format",
"(",
"len",
"(",
"filtered_songs",
")",
")",
")",
"logger",
".",
"info",
"(",
"\"Loaded {0} Google Music songs\"",
".",
"format",
"(",
"len",
"(",
"matched_songs",
")",
")",
")",
"return",
"matched_songs",
",",
"filtered_songs"
]
| Create song list from user's Google Music library.
Parameters:
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Mobileclient client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Mobileclient client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of Google Music song dicts matching criteria and
a list of Google Music song dicts filtered out using filter criteria. | [
"Create",
"song",
"list",
"from",
"user",
"s",
"Google",
"Music",
"library",
"."
]
| python | valid | 44.166667 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.