repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
pandas-dev/pandas | pandas/core/generic.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L3817-L3868 | def _drop_axis(self, labels, axis, level=None, errors='raise'):
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == 'raise' and indexer.all():
raise KeyError('{} not found in axis'.format(labels))
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == 'raise' and labels_missing:
raise KeyError('{} not found in axis'.format(labels))
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result | [
"def",
"_drop_axis",
"(",
"self",
",",
"labels",
",",
"axis",
",",
"level",
"=",
"None",
",",
"errors",
"=",
"'raise'",
")",
":",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"axis_name",
"=",
"self",
".",
"_get_axis_name",
"(",
"axis",
")",
"axis",
"=",
"self",
".",
"_get_axis",
"(",
"axis",
")",
"if",
"axis",
".",
"is_unique",
":",
"if",
"level",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"axis",
",",
"MultiIndex",
")",
":",
"raise",
"AssertionError",
"(",
"'axis must be a MultiIndex'",
")",
"new_axis",
"=",
"axis",
".",
"drop",
"(",
"labels",
",",
"level",
"=",
"level",
",",
"errors",
"=",
"errors",
")",
"else",
":",
"new_axis",
"=",
"axis",
".",
"drop",
"(",
"labels",
",",
"errors",
"=",
"errors",
")",
"result",
"=",
"self",
".",
"reindex",
"(",
"*",
"*",
"{",
"axis_name",
":",
"new_axis",
"}",
")",
"# Case for non-unique axis",
"else",
":",
"labels",
"=",
"ensure_object",
"(",
"com",
".",
"index_labels_to_array",
"(",
"labels",
")",
")",
"if",
"level",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"axis",
",",
"MultiIndex",
")",
":",
"raise",
"AssertionError",
"(",
"'axis must be a MultiIndex'",
")",
"indexer",
"=",
"~",
"axis",
".",
"get_level_values",
"(",
"level",
")",
".",
"isin",
"(",
"labels",
")",
"# GH 18561 MultiIndex.drop should raise if label is absent",
"if",
"errors",
"==",
"'raise'",
"and",
"indexer",
".",
"all",
"(",
")",
":",
"raise",
"KeyError",
"(",
"'{} not found in axis'",
".",
"format",
"(",
"labels",
")",
")",
"else",
":",
"indexer",
"=",
"~",
"axis",
".",
"isin",
"(",
"labels",
")",
"# Check if label doesn't exist along axis",
"labels_missing",
"=",
"(",
"axis",
".",
"get_indexer_for",
"(",
"labels",
")",
"==",
"-",
"1",
")",
".",
"any",
"(",
")",
"if",
"errors",
"==",
"'raise'",
"and",
"labels_missing",
":",
"raise",
"KeyError",
"(",
"'{} not found in axis'",
".",
"format",
"(",
"labels",
")",
")",
"slicer",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"self",
".",
"ndim",
"slicer",
"[",
"self",
".",
"_get_axis_number",
"(",
"axis_name",
")",
"]",
"=",
"indexer",
"result",
"=",
"self",
".",
"loc",
"[",
"tuple",
"(",
"slicer",
")",
"]",
"return",
"result"
]
| Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped. | [
"Drop",
"labels",
"from",
"specified",
"axis",
".",
"Used",
"in",
"the",
"drop",
"method",
"internally",
"."
]
| python | train | 38.884615 |
kensho-technologies/graphql-compiler | setup.py | https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/setup.py#L13-L20 | def read_file(filename):
"""Read package file as text to get name and version"""
# intentionally *not* adding an encoding option to open
# see here:
# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, 'graphql_compiler', filename), 'r') as f:
return f.read() | [
"def",
"read_file",
"(",
"filename",
")",
":",
"# intentionally *not* adding an encoding option to open",
"# see here:",
"# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690",
"here",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"with",
"codecs",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"here",
",",
"'graphql_compiler'",
",",
"filename",
")",
",",
"'r'",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")"
]
| Read package file as text to get name and version | [
"Read",
"package",
"file",
"as",
"text",
"to",
"get",
"name",
"and",
"version"
]
| python | train | 48.25 |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/input_readers.py | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/input_readers.py#L1368-L1395 | def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BLOB_KEYS_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_keys' for mapper input")
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
if len(blob_keys) > cls._MAX_BLOB_KEYS_COUNT:
raise BadReaderParamsError("Too many 'blob_keys' for mapper input")
if not blob_keys:
raise BadReaderParamsError("No 'blob_keys' specified for mapper input")
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
if not blob_info:
raise BadReaderParamsError("Could not find blobinfo for key %s" %
blob_key) | [
"def",
"validate",
"(",
"cls",
",",
"mapper_spec",
")",
":",
"if",
"mapper_spec",
".",
"input_reader_class",
"(",
")",
"!=",
"cls",
":",
"raise",
"BadReaderParamsError",
"(",
"\"Mapper input reader class mismatch\"",
")",
"params",
"=",
"_get_params",
"(",
"mapper_spec",
")",
"if",
"cls",
".",
"BLOB_KEYS_PARAM",
"not",
"in",
"params",
":",
"raise",
"BadReaderParamsError",
"(",
"\"Must specify 'blob_keys' for mapper input\"",
")",
"blob_keys",
"=",
"params",
"[",
"cls",
".",
"BLOB_KEYS_PARAM",
"]",
"if",
"isinstance",
"(",
"blob_keys",
",",
"basestring",
")",
":",
"# This is a mechanism to allow multiple blob keys (which do not contain",
"# commas) in a single string. It may go away.",
"blob_keys",
"=",
"blob_keys",
".",
"split",
"(",
"\",\"",
")",
"if",
"len",
"(",
"blob_keys",
")",
">",
"cls",
".",
"_MAX_BLOB_KEYS_COUNT",
":",
"raise",
"BadReaderParamsError",
"(",
"\"Too many 'blob_keys' for mapper input\"",
")",
"if",
"not",
"blob_keys",
":",
"raise",
"BadReaderParamsError",
"(",
"\"No 'blob_keys' specified for mapper input\"",
")",
"for",
"blob_key",
"in",
"blob_keys",
":",
"blob_info",
"=",
"blobstore",
".",
"BlobInfo",
".",
"get",
"(",
"blobstore",
".",
"BlobKey",
"(",
"blob_key",
")",
")",
"if",
"not",
"blob_info",
":",
"raise",
"BadReaderParamsError",
"(",
"\"Could not find blobinfo for key %s\"",
"%",
"blob_key",
")"
]
| Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid. | [
"Validates",
"mapper",
"spec",
"and",
"all",
"mapper",
"parameters",
"."
]
| python | train | 43.571429 |
nutechsoftware/alarmdecoder | alarmdecoder/zonetracking.py | https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/alarmdecoder/zonetracking.py#L314-L328 | def _add_zone(self, zone, name='', status=Zone.CLEAR, expander=False):
"""
Adds a zone to the internal zone list.
:param zone: zone number
:type zone: int
:param name: human readable zone name
:type name: string
:param status: zone status
:type status: int
"""
if not zone in self._zones:
self._zones[zone] = Zone(zone=zone, name=name, status=None, expander=expander)
self._update_zone(zone, status=status) | [
"def",
"_add_zone",
"(",
"self",
",",
"zone",
",",
"name",
"=",
"''",
",",
"status",
"=",
"Zone",
".",
"CLEAR",
",",
"expander",
"=",
"False",
")",
":",
"if",
"not",
"zone",
"in",
"self",
".",
"_zones",
":",
"self",
".",
"_zones",
"[",
"zone",
"]",
"=",
"Zone",
"(",
"zone",
"=",
"zone",
",",
"name",
"=",
"name",
",",
"status",
"=",
"None",
",",
"expander",
"=",
"expander",
")",
"self",
".",
"_update_zone",
"(",
"zone",
",",
"status",
"=",
"status",
")"
]
| Adds a zone to the internal zone list.
:param zone: zone number
:type zone: int
:param name: human readable zone name
:type name: string
:param status: zone status
:type status: int | [
"Adds",
"a",
"zone",
"to",
"the",
"internal",
"zone",
"list",
"."
]
| python | train | 32.933333 |
openvax/varlens | varlens/read_evidence/pileup_collection.py | https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/read_evidence/pileup_collection.py#L365-L402 | def group_by_match(self, variant):
'''
Given a variant, split the PileupCollection based on whether it the
data supports the reference allele, the alternate allele, or neither.
Parameters
----------
variant : Variant
The variant. Must have fields 'locus', 'ref', and 'alt'.
Returns
----------
A MatchingEvidence named tuple with fields (ref, alt, other),
each of which is a string -> PileupCollection dict mapping alleles
to the PileupCollection of evidence supporting them.
'''
locus = to_locus(variant)
if len(variant.ref) != len(locus.positions):
logging.warning(
"Ref is length %d but locus has %d bases in variant: %s" %
(len(variant.ref), len(locus.positions), str(variant)))
alleles_dict = self.group_by_allele(locus)
single_base_loci = [
Locus.from_interbase_coordinates(locus.contig, position)
for position in locus.positions
]
empty_pileups = dict(
(locus, Pileup(locus=locus, elements=[]))
for locus in single_base_loci)
empty_collection = PileupCollection(pileups=empty_pileups, parent=self)
ref = {variant.ref: alleles_dict.pop(variant.ref, empty_collection)}
alt = {variant.alt: alleles_dict.pop(variant.alt, empty_collection)}
other = alleles_dict
# TODO: consider end of read issues for insertions
return MatchingEvidence(ref, alt, other) | [
"def",
"group_by_match",
"(",
"self",
",",
"variant",
")",
":",
"locus",
"=",
"to_locus",
"(",
"variant",
")",
"if",
"len",
"(",
"variant",
".",
"ref",
")",
"!=",
"len",
"(",
"locus",
".",
"positions",
")",
":",
"logging",
".",
"warning",
"(",
"\"Ref is length %d but locus has %d bases in variant: %s\"",
"%",
"(",
"len",
"(",
"variant",
".",
"ref",
")",
",",
"len",
"(",
"locus",
".",
"positions",
")",
",",
"str",
"(",
"variant",
")",
")",
")",
"alleles_dict",
"=",
"self",
".",
"group_by_allele",
"(",
"locus",
")",
"single_base_loci",
"=",
"[",
"Locus",
".",
"from_interbase_coordinates",
"(",
"locus",
".",
"contig",
",",
"position",
")",
"for",
"position",
"in",
"locus",
".",
"positions",
"]",
"empty_pileups",
"=",
"dict",
"(",
"(",
"locus",
",",
"Pileup",
"(",
"locus",
"=",
"locus",
",",
"elements",
"=",
"[",
"]",
")",
")",
"for",
"locus",
"in",
"single_base_loci",
")",
"empty_collection",
"=",
"PileupCollection",
"(",
"pileups",
"=",
"empty_pileups",
",",
"parent",
"=",
"self",
")",
"ref",
"=",
"{",
"variant",
".",
"ref",
":",
"alleles_dict",
".",
"pop",
"(",
"variant",
".",
"ref",
",",
"empty_collection",
")",
"}",
"alt",
"=",
"{",
"variant",
".",
"alt",
":",
"alleles_dict",
".",
"pop",
"(",
"variant",
".",
"alt",
",",
"empty_collection",
")",
"}",
"other",
"=",
"alleles_dict",
"# TODO: consider end of read issues for insertions",
"return",
"MatchingEvidence",
"(",
"ref",
",",
"alt",
",",
"other",
")"
]
| Given a variant, split the PileupCollection based on whether it the
data supports the reference allele, the alternate allele, or neither.
Parameters
----------
variant : Variant
The variant. Must have fields 'locus', 'ref', and 'alt'.
Returns
----------
A MatchingEvidence named tuple with fields (ref, alt, other),
each of which is a string -> PileupCollection dict mapping alleles
to the PileupCollection of evidence supporting them. | [
"Given",
"a",
"variant",
"split",
"the",
"PileupCollection",
"based",
"on",
"whether",
"it",
"the",
"data",
"supports",
"the",
"reference",
"allele",
"the",
"alternate",
"allele",
"or",
"neither",
"."
]
| python | train | 39.947368 |
thespacedoctor/astrocalc | astrocalc/coords/unit_conversion.py | https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/coords/unit_conversion.py#L363-L448 | def dec_decimal_to_sexegesimal(
self,
dec,
delimiter=":"):
"""
*Convert a declination between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``dec`` -- DEC in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace dec_to_sex in dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_decimal_to_sexegesimal(
dec="-3.454676456",
delimiter=":"
)
print dec
# OUT: -03:27:16.8
"""
self.log.info('starting the ``dec_decimal_to_sexegesimal`` method')
import math
# CONVERT DEC TO FLOAT
try:
self.log.debug("attempting to convert RA to float")
dec = float(dec)
except Exception, e:
self.log.error(
"could not convert RA to float - failed with this error: %s " % (str(e),))
return -1
# COMPLAIN IF DEC NOT BETWEEN -90 - 90
if dec > -90. and dec < 90.:
pass
else:
self.log.error(
"DEC must be between -90 - 90 degrees")
return -1
if (dec >= 0):
hemisphere = '+'
else:
hemisphere = '-'
dec *= -1
# PRECISION TEST
# 1s = .000277778 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 4
decimalLen = len(repr(dec).split(".")[-1])
precision = decimalLen - 4
dec_deg = int(dec)
dec_mm = int((dec - dec_deg) * 60)
dec_ss = int(((dec - dec_deg) * 60 - dec_mm) * 60)
dec_f = (((dec - dec_deg) * 60 - dec_mm) * 60) - dec_ss
# SET PRECISION
dec_f = repr(dec_f)[2:]
dec_f = dec_f[:precision]
if len(dec_f):
dec_f = "." + dec_f
if precision < 0:
dec_f = ""
sexegesimal = hemisphere + '%02d' % dec_deg + delimiter + \
'%02d' % dec_mm + delimiter + '%02d' % dec_ss + dec_f
self.log.info('completed the ``dec_decimal_to_sexegesimal`` method')
return sexegesimal | [
"def",
"dec_decimal_to_sexegesimal",
"(",
"self",
",",
"dec",
",",
"delimiter",
"=",
"\":\"",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'starting the ``dec_decimal_to_sexegesimal`` method'",
")",
"import",
"math",
"# CONVERT DEC TO FLOAT",
"try",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"attempting to convert RA to float\"",
")",
"dec",
"=",
"float",
"(",
"dec",
")",
"except",
"Exception",
",",
"e",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"could not convert RA to float - failed with this error: %s \"",
"%",
"(",
"str",
"(",
"e",
")",
",",
")",
")",
"return",
"-",
"1",
"# COMPLAIN IF DEC NOT BETWEEN -90 - 90",
"if",
"dec",
">",
"-",
"90.",
"and",
"dec",
"<",
"90.",
":",
"pass",
"else",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"DEC must be between -90 - 90 degrees\"",
")",
"return",
"-",
"1",
"if",
"(",
"dec",
">=",
"0",
")",
":",
"hemisphere",
"=",
"'+'",
"else",
":",
"hemisphere",
"=",
"'-'",
"dec",
"*=",
"-",
"1",
"# PRECISION TEST",
"# 1s = .000277778 DEGREE",
"# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 4",
"decimalLen",
"=",
"len",
"(",
"repr",
"(",
"dec",
")",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"1",
"]",
")",
"precision",
"=",
"decimalLen",
"-",
"4",
"dec_deg",
"=",
"int",
"(",
"dec",
")",
"dec_mm",
"=",
"int",
"(",
"(",
"dec",
"-",
"dec_deg",
")",
"*",
"60",
")",
"dec_ss",
"=",
"int",
"(",
"(",
"(",
"dec",
"-",
"dec_deg",
")",
"*",
"60",
"-",
"dec_mm",
")",
"*",
"60",
")",
"dec_f",
"=",
"(",
"(",
"(",
"dec",
"-",
"dec_deg",
")",
"*",
"60",
"-",
"dec_mm",
")",
"*",
"60",
")",
"-",
"dec_ss",
"# SET PRECISION",
"dec_f",
"=",
"repr",
"(",
"dec_f",
")",
"[",
"2",
":",
"]",
"dec_f",
"=",
"dec_f",
"[",
":",
"precision",
"]",
"if",
"len",
"(",
"dec_f",
")",
":",
"dec_f",
"=",
"\".\"",
"+",
"dec_f",
"if",
"precision",
"<",
"0",
":",
"dec_f",
"=",
"\"\"",
"sexegesimal",
"=",
"hemisphere",
"+",
"'%02d'",
"%",
"dec_deg",
"+",
"delimiter",
"+",
"'%02d'",
"%",
"dec_mm",
"+",
"delimiter",
"+",
"'%02d'",
"%",
"dec_ss",
"+",
"dec_f",
"self",
".",
"log",
".",
"info",
"(",
"'completed the ``dec_decimal_to_sexegesimal`` method'",
")",
"return",
"sexegesimal"
]
| *Convert a declination between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``dec`` -- DEC in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace dec_to_sex in dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_decimal_to_sexegesimal(
dec="-3.454676456",
delimiter=":"
)
print dec
# OUT: -03:27:16.8 | [
"*",
"Convert",
"a",
"declination",
"between",
"decimal",
"degrees",
"and",
"sexegesimal",
".",
"*"
]
| python | train | 29.488372 |
tensorflow/lucid | lucid/optvis/objectives.py | https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L332-L358 | def penalize_boundary_complexity(shp, w=20, mask=None, C=0.5):
"""Encourage the boundaries of an image to have less variation and of color C.
Args:
shp: shape of T("input") because this may not be known.
w: width of boundary to penalize. Ignored if mask is set.
mask: mask describing what area should be penalized.
Returns:
Objective.
"""
def inner(T):
arr = T("input")
# print shp
if mask is None:
mask_ = np.ones(shp)
mask_[:, w:-w, w:-w] = 0
else:
mask_ = mask
blur = _tf_blur(arr, w=5)
diffs = (blur-arr)**2
diffs += 0.8*(arr-C)**2
return -tf.reduce_sum(diffs*mask_)
return inner | [
"def",
"penalize_boundary_complexity",
"(",
"shp",
",",
"w",
"=",
"20",
",",
"mask",
"=",
"None",
",",
"C",
"=",
"0.5",
")",
":",
"def",
"inner",
"(",
"T",
")",
":",
"arr",
"=",
"T",
"(",
"\"input\"",
")",
"# print shp",
"if",
"mask",
"is",
"None",
":",
"mask_",
"=",
"np",
".",
"ones",
"(",
"shp",
")",
"mask_",
"[",
":",
",",
"w",
":",
"-",
"w",
",",
"w",
":",
"-",
"w",
"]",
"=",
"0",
"else",
":",
"mask_",
"=",
"mask",
"blur",
"=",
"_tf_blur",
"(",
"arr",
",",
"w",
"=",
"5",
")",
"diffs",
"=",
"(",
"blur",
"-",
"arr",
")",
"**",
"2",
"diffs",
"+=",
"0.8",
"*",
"(",
"arr",
"-",
"C",
")",
"**",
"2",
"return",
"-",
"tf",
".",
"reduce_sum",
"(",
"diffs",
"*",
"mask_",
")",
"return",
"inner"
]
| Encourage the boundaries of an image to have less variation and of color C.
Args:
shp: shape of T("input") because this may not be known.
w: width of boundary to penalize. Ignored if mask is set.
mask: mask describing what area should be penalized.
Returns:
Objective. | [
"Encourage",
"the",
"boundaries",
"of",
"an",
"image",
"to",
"have",
"less",
"variation",
"and",
"of",
"color",
"C",
"."
]
| python | train | 23.703704 |
bcbio/bcbio-nextgen | bcbio/structural/regions.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/regions.py#L98-L141 | def _calc_sizes(self, cnv_file, items):
"""Retrieve target and antitarget bin sizes based on depth.
Similar to CNVkit's do_autobin but tries to have a standard set of
ranges (50bp intervals for target and 10kb intervals for antitarget).
"""
bp_per_bin = 100000 # same target as CNVkit
range_map = {"target": (100, 250), "antitarget": (10000, 1000000)}
target_bps = []
anti_bps = []
checked_beds = set([])
for data in items:
region_bed = tz.get_in(["depth", "variant_regions", "regions"], data)
if region_bed and region_bed not in checked_beds:
with utils.open_gzipsafe(region_bed) as in_handle:
for r in pybedtools.BedTool(in_handle).intersect(cnv_file):
if r.stop - r.start > range_map["target"][0]:
target_bps.append(float(r.name))
with utils.open_gzipsafe(region_bed) as in_handle:
for r in pybedtools.BedTool(in_handle).intersect(cnv_file, v=True):
if r.stop - r.start > range_map["target"][1]:
anti_bps.append(float(r.name))
checked_beds.add(region_bed)
def scale_in_boundary(raw, round_interval, range_targets):
min_val, max_val = range_targets
out = int(math.ceil(raw / float(round_interval)) * round_interval)
if out > max_val:
return max_val
elif out < min_val:
return min_val
else:
return out
if target_bps and np.median(target_bps) > 0:
raw_target_bin = bp_per_bin / float(np.median(target_bps))
target_bin = scale_in_boundary(raw_target_bin, 50, range_map["target"])
else:
target_bin = range_map["target"][1]
if anti_bps and np.median(anti_bps) > 0:
raw_anti_bin = bp_per_bin / float(np.median(anti_bps))
anti_bin = scale_in_boundary(raw_anti_bin, 10000, range_map["antitarget"])
else:
anti_bin = range_map["antitarget"][1]
return target_bin, anti_bin | [
"def",
"_calc_sizes",
"(",
"self",
",",
"cnv_file",
",",
"items",
")",
":",
"bp_per_bin",
"=",
"100000",
"# same target as CNVkit",
"range_map",
"=",
"{",
"\"target\"",
":",
"(",
"100",
",",
"250",
")",
",",
"\"antitarget\"",
":",
"(",
"10000",
",",
"1000000",
")",
"}",
"target_bps",
"=",
"[",
"]",
"anti_bps",
"=",
"[",
"]",
"checked_beds",
"=",
"set",
"(",
"[",
"]",
")",
"for",
"data",
"in",
"items",
":",
"region_bed",
"=",
"tz",
".",
"get_in",
"(",
"[",
"\"depth\"",
",",
"\"variant_regions\"",
",",
"\"regions\"",
"]",
",",
"data",
")",
"if",
"region_bed",
"and",
"region_bed",
"not",
"in",
"checked_beds",
":",
"with",
"utils",
".",
"open_gzipsafe",
"(",
"region_bed",
")",
"as",
"in_handle",
":",
"for",
"r",
"in",
"pybedtools",
".",
"BedTool",
"(",
"in_handle",
")",
".",
"intersect",
"(",
"cnv_file",
")",
":",
"if",
"r",
".",
"stop",
"-",
"r",
".",
"start",
">",
"range_map",
"[",
"\"target\"",
"]",
"[",
"0",
"]",
":",
"target_bps",
".",
"append",
"(",
"float",
"(",
"r",
".",
"name",
")",
")",
"with",
"utils",
".",
"open_gzipsafe",
"(",
"region_bed",
")",
"as",
"in_handle",
":",
"for",
"r",
"in",
"pybedtools",
".",
"BedTool",
"(",
"in_handle",
")",
".",
"intersect",
"(",
"cnv_file",
",",
"v",
"=",
"True",
")",
":",
"if",
"r",
".",
"stop",
"-",
"r",
".",
"start",
">",
"range_map",
"[",
"\"target\"",
"]",
"[",
"1",
"]",
":",
"anti_bps",
".",
"append",
"(",
"float",
"(",
"r",
".",
"name",
")",
")",
"checked_beds",
".",
"add",
"(",
"region_bed",
")",
"def",
"scale_in_boundary",
"(",
"raw",
",",
"round_interval",
",",
"range_targets",
")",
":",
"min_val",
",",
"max_val",
"=",
"range_targets",
"out",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"raw",
"/",
"float",
"(",
"round_interval",
")",
")",
"*",
"round_interval",
")",
"if",
"out",
">",
"max_val",
":",
"return",
"max_val",
"elif",
"out",
"<",
"min_val",
":",
"return",
"min_val",
"else",
":",
"return",
"out",
"if",
"target_bps",
"and",
"np",
".",
"median",
"(",
"target_bps",
")",
">",
"0",
":",
"raw_target_bin",
"=",
"bp_per_bin",
"/",
"float",
"(",
"np",
".",
"median",
"(",
"target_bps",
")",
")",
"target_bin",
"=",
"scale_in_boundary",
"(",
"raw_target_bin",
",",
"50",
",",
"range_map",
"[",
"\"target\"",
"]",
")",
"else",
":",
"target_bin",
"=",
"range_map",
"[",
"\"target\"",
"]",
"[",
"1",
"]",
"if",
"anti_bps",
"and",
"np",
".",
"median",
"(",
"anti_bps",
")",
">",
"0",
":",
"raw_anti_bin",
"=",
"bp_per_bin",
"/",
"float",
"(",
"np",
".",
"median",
"(",
"anti_bps",
")",
")",
"anti_bin",
"=",
"scale_in_boundary",
"(",
"raw_anti_bin",
",",
"10000",
",",
"range_map",
"[",
"\"antitarget\"",
"]",
")",
"else",
":",
"anti_bin",
"=",
"range_map",
"[",
"\"antitarget\"",
"]",
"[",
"1",
"]",
"return",
"target_bin",
",",
"anti_bin"
]
| Retrieve target and antitarget bin sizes based on depth.
Similar to CNVkit's do_autobin but tries to have a standard set of
ranges (50bp intervals for target and 10kb intervals for antitarget). | [
"Retrieve",
"target",
"and",
"antitarget",
"bin",
"sizes",
"based",
"on",
"depth",
"."
]
| python | train | 48.795455 |
pycontribs/pyrax | pyrax/utils.py | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/utils.py#L549-L562 | def to_timestamp(val):
"""
Takes a value that is either a Python date, datetime, or a string
representation of a date/datetime value. Returns a standard Unix timestamp
corresponding to that value.
"""
# If we're given a number, give it right back - it's already a timestamp.
if isinstance(val, numbers.Number):
return val
elif isinstance(val, six.string_types):
dt = _parse_datetime_string(val)
else:
dt = val
return time.mktime(dt.timetuple()) | [
"def",
"to_timestamp",
"(",
"val",
")",
":",
"# If we're given a number, give it right back - it's already a timestamp.",
"if",
"isinstance",
"(",
"val",
",",
"numbers",
".",
"Number",
")",
":",
"return",
"val",
"elif",
"isinstance",
"(",
"val",
",",
"six",
".",
"string_types",
")",
":",
"dt",
"=",
"_parse_datetime_string",
"(",
"val",
")",
"else",
":",
"dt",
"=",
"val",
"return",
"time",
".",
"mktime",
"(",
"dt",
".",
"timetuple",
"(",
")",
")"
]
| Takes a value that is either a Python date, datetime, or a string
representation of a date/datetime value. Returns a standard Unix timestamp
corresponding to that value. | [
"Takes",
"a",
"value",
"that",
"is",
"either",
"a",
"Python",
"date",
"datetime",
"or",
"a",
"string",
"representation",
"of",
"a",
"date",
"/",
"datetime",
"value",
".",
"Returns",
"a",
"standard",
"Unix",
"timestamp",
"corresponding",
"to",
"that",
"value",
"."
]
| python | train | 35.357143 |
pletzer/pnumpy | examples/exAverage2d.py | https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/examples/exAverage2d.py#L44-L65 | def setValues(nxG, nyG, iBeg, iEnd, jBeg, jEnd, data):
"""
Set setValues
@param nxG number of global cells in x
@param nyG number of global cells in y
@param iBeg global starting index in x
@param iEnd global ending index in x
@param jBeg global starting index in y
@param jEnd global ending index in y
@param data local array
"""
nxGHalf = nxG/2.
nyGHalf = nyG/2.
nxGQuart = nxGHalf/2.
nyGQuart = nyGHalf/2.
for i in range(data.shape[0]):
iG = iBeg + i
di = iG - nxG
for j in range(data.shape[1]):
jG = jBeg + j
dj = jG - 0.8*nyG
data[i, j] = numpy.floor(1.9*numpy.exp(-di**2/nxGHalf**2 - dj**2/nyGHalf**2)) | [
"def",
"setValues",
"(",
"nxG",
",",
"nyG",
",",
"iBeg",
",",
"iEnd",
",",
"jBeg",
",",
"jEnd",
",",
"data",
")",
":",
"nxGHalf",
"=",
"nxG",
"/",
"2.",
"nyGHalf",
"=",
"nyG",
"/",
"2.",
"nxGQuart",
"=",
"nxGHalf",
"/",
"2.",
"nyGQuart",
"=",
"nyGHalf",
"/",
"2.",
"for",
"i",
"in",
"range",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
")",
":",
"iG",
"=",
"iBeg",
"+",
"i",
"di",
"=",
"iG",
"-",
"nxG",
"for",
"j",
"in",
"range",
"(",
"data",
".",
"shape",
"[",
"1",
"]",
")",
":",
"jG",
"=",
"jBeg",
"+",
"j",
"dj",
"=",
"jG",
"-",
"0.8",
"*",
"nyG",
"data",
"[",
"i",
",",
"j",
"]",
"=",
"numpy",
".",
"floor",
"(",
"1.9",
"*",
"numpy",
".",
"exp",
"(",
"-",
"di",
"**",
"2",
"/",
"nxGHalf",
"**",
"2",
"-",
"dj",
"**",
"2",
"/",
"nyGHalf",
"**",
"2",
")",
")"
]
| Set setValues
@param nxG number of global cells in x
@param nyG number of global cells in y
@param iBeg global starting index in x
@param iEnd global ending index in x
@param jBeg global starting index in y
@param jEnd global ending index in y
@param data local array | [
"Set",
"setValues"
]
| python | train | 32.136364 |
raphaelm/python-fints | fints/client.py | https://github.com/raphaelm/python-fints/blob/fee55ae37d3182d0adb40507d4acb98b06057e4a/fints/client.py#L290-L308 | def deconstruct(self, including_private: bool=False) -> bytes:
"""Return state of this FinTSClient instance as an opaque datablob. You should not
use this object after calling this method.
Information about the connection is implicitly retrieved from the bank and
cached in the FinTSClient. This includes: system identifier, bank parameter
data, user parameter data. It's not strictly required to retain this information
across sessions, but beneficial. If possible, an API user SHOULD use this method
to serialize the client instance before destroying it, and provide the serialized
data next time an instance is constructed.
Parameter `including_private` should be set to True, if the storage is sufficiently
secure (with regards to confidentiality) to include private data, specifically,
account numbers and names. Most often this is the case.
Note: No connection information is stored in the datablob, neither is the PIN.
"""
data = self._deconstruct_v1(including_private=including_private)
return compress_datablob(DATA_BLOB_MAGIC, 1, data) | [
"def",
"deconstruct",
"(",
"self",
",",
"including_private",
":",
"bool",
"=",
"False",
")",
"->",
"bytes",
":",
"data",
"=",
"self",
".",
"_deconstruct_v1",
"(",
"including_private",
"=",
"including_private",
")",
"return",
"compress_datablob",
"(",
"DATA_BLOB_MAGIC",
",",
"1",
",",
"data",
")"
]
| Return state of this FinTSClient instance as an opaque datablob. You should not
use this object after calling this method.
Information about the connection is implicitly retrieved from the bank and
cached in the FinTSClient. This includes: system identifier, bank parameter
data, user parameter data. It's not strictly required to retain this information
across sessions, but beneficial. If possible, an API user SHOULD use this method
to serialize the client instance before destroying it, and provide the serialized
data next time an instance is constructed.
Parameter `including_private` should be set to True, if the storage is sufficiently
secure (with regards to confidentiality) to include private data, specifically,
account numbers and names. Most often this is the case.
Note: No connection information is stored in the datablob, neither is the PIN. | [
"Return",
"state",
"of",
"this",
"FinTSClient",
"instance",
"as",
"an",
"opaque",
"datablob",
".",
"You",
"should",
"not",
"use",
"this",
"object",
"after",
"calling",
"this",
"method",
"."
]
| python | train | 60.526316 |
googleapis/google-cloud-python | logging/google/cloud/logging/_gapic.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/google/cloud/logging/_gapic.py#L408-L418 | def metric_delete(self, project, metric_name):
"""API call: delete a metric resource.
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric
"""
path = "projects/%s/metrics/%s" % (project, metric_name)
self._gapic_api.delete_log_metric(path) | [
"def",
"metric_delete",
"(",
"self",
",",
"project",
",",
"metric_name",
")",
":",
"path",
"=",
"\"projects/%s/metrics/%s\"",
"%",
"(",
"project",
",",
"metric_name",
")",
"self",
".",
"_gapic_api",
".",
"delete_log_metric",
"(",
"path",
")"
]
| API call: delete a metric resource.
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric | [
"API",
"call",
":",
"delete",
"a",
"metric",
"resource",
"."
]
| python | train | 35 |
LISE-B26/pylabcontrol | build/lib/pylabcontrol/src/tools/export_default.py | https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/tools/export_default.py#L85-L113 | def export_default_probes(path, module_name = '', raise_errors = False):
"""
NOT IMPLEMENTED YET
tries to instantiate all the instruments that are imported in /instruments/__init__.py
and the probes of each instrument that could be instantiated into a .b26 file in the folder path
Args:
path: target path for .b26 files
"""
raise NotImplementedError
import b26_toolkit.b26_toolkit.instruments as instruments
from pylabcontrol.core import Probe
for name, obj in inspect.getmembers(instruments):
if inspect.isclass(obj):
try:
instrument = obj()
print(('--- created ', obj.__name__, ' -- '))
for probe_name, probe_info in instrument._PROBES.items():
probe = Probe(instrument, probe_name, info = probe_info)
filename = os.path.join(path, '{:s}.b26'.format(instrument.name))
probe.save(filename)
except:
print(('failed to create probe file for: {:s}'.format(obj.__name__)))
print(('failed to create probe file for: {:s}'.format(obj.__name__))) | [
"def",
"export_default_probes",
"(",
"path",
",",
"module_name",
"=",
"''",
",",
"raise_errors",
"=",
"False",
")",
":",
"raise",
"NotImplementedError",
"import",
"b26_toolkit",
".",
"b26_toolkit",
".",
"instruments",
"as",
"instruments",
"from",
"pylabcontrol",
".",
"core",
"import",
"Probe",
"for",
"name",
",",
"obj",
"in",
"inspect",
".",
"getmembers",
"(",
"instruments",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"obj",
")",
":",
"try",
":",
"instrument",
"=",
"obj",
"(",
")",
"print",
"(",
"(",
"'--- created '",
",",
"obj",
".",
"__name__",
",",
"' -- '",
")",
")",
"for",
"probe_name",
",",
"probe_info",
"in",
"instrument",
".",
"_PROBES",
".",
"items",
"(",
")",
":",
"probe",
"=",
"Probe",
"(",
"instrument",
",",
"probe_name",
",",
"info",
"=",
"probe_info",
")",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'{:s}.b26'",
".",
"format",
"(",
"instrument",
".",
"name",
")",
")",
"probe",
".",
"save",
"(",
"filename",
")",
"except",
":",
"print",
"(",
"(",
"'failed to create probe file for: {:s}'",
".",
"format",
"(",
"obj",
".",
"__name__",
")",
")",
")",
"print",
"(",
"(",
"'failed to create probe file for: {:s}'",
".",
"format",
"(",
"obj",
".",
"__name__",
")",
")",
")"
]
| NOT IMPLEMENTED YET
tries to instantiate all the instruments that are imported in /instruments/__init__.py
and the probes of each instrument that could be instantiated into a .b26 file in the folder path
Args:
path: target path for .b26 files | [
"NOT",
"IMPLEMENTED",
"YET",
"tries",
"to",
"instantiate",
"all",
"the",
"instruments",
"that",
"are",
"imported",
"in",
"/",
"instruments",
"/",
"__init__",
".",
"py",
"and",
"the",
"probes",
"of",
"each",
"instrument",
"that",
"could",
"be",
"instantiated",
"into",
"a",
".",
"b26",
"file",
"in",
"the",
"folder",
"path",
"Args",
":",
"path",
":",
"target",
"path",
"for",
".",
"b26",
"files"
]
| python | train | 39.172414 |
axltxl/m2bk | m2bk/app.py | https://github.com/axltxl/m2bk/blob/980083dfd17e6e783753a946e9aa809714551141/m2bk/app.py#L31-L96 | def init_parsecmdline(argv=[]):
"""
Parse arguments from the command line
:param argv: list of arguments
"""
# main argument parser
parser = argparse.ArgumentParser(prog=PKG_NAME)
# --version
parser.add_argument('--version', action='version', version=version)
# -c, --config <file_name>
parser.add_argument("-c", "--config",
action="store",
dest="config_file", default=config.CONF_DEFAULT_FILE,
help="specify configuration file to use")
# --dry-run
parser.add_argument("-d", "--dry-run",
action="store_true", dest="dry_run", default=False,
help="don't actually do anything")
# --quiet
parser.add_argument("-q", "--quiet",
action="store_true", dest="log_quiet", default=False,
help="quiet output")
# --ll <level>
# logging level
parser.add_argument("--ll", "--log-level",
action="store", type=int,
dest="log_lvl", default=log.LOG_LVL_DEFAULT,
help="set logging level")
# -l, --log-file
parser.add_argument("-l", "--log-file",
action="store",
dest="log_file", default=log.LOG_FILE_DEFAULT,
help="set log file")
# Absorb the options
options = parser.parse_args(argv)
# Set whether we are going to perform a dry run
global _opt
_opt["dry_run"] = options.dry_run
# Initiate the log level
log.init(threshold_lvl=options.log_lvl,
quiet_stdout=options.log_quiet, log_file=options.log_file)
#
# Print the splash
#
_splash()
# Merge configuration with a JSON file
config_file = os.path.abspath(options.config_file)
log.msg("Attempting to use configuration file '{config_file}'"
.format(config_file=config_file))
try:
config.set_from_file(config_file)
except FileNotFoundError:
raise FileNotFoundError("Configuration file '{config_file}' not found!"
.format(config_file=config_file)) | [
"def",
"init_parsecmdline",
"(",
"argv",
"=",
"[",
"]",
")",
":",
"# main argument parser",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"PKG_NAME",
")",
"# --version",
"parser",
".",
"add_argument",
"(",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"version",
"=",
"version",
")",
"# -c, --config <file_name>",
"parser",
".",
"add_argument",
"(",
"\"-c\"",
",",
"\"--config\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"config_file\"",
",",
"default",
"=",
"config",
".",
"CONF_DEFAULT_FILE",
",",
"help",
"=",
"\"specify configuration file to use\"",
")",
"# --dry-run",
"parser",
".",
"add_argument",
"(",
"\"-d\"",
",",
"\"--dry-run\"",
",",
"action",
"=",
"\"store_true\"",
",",
"dest",
"=",
"\"dry_run\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"don't actually do anything\"",
")",
"# --quiet",
"parser",
".",
"add_argument",
"(",
"\"-q\"",
",",
"\"--quiet\"",
",",
"action",
"=",
"\"store_true\"",
",",
"dest",
"=",
"\"log_quiet\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"quiet output\"",
")",
"# --ll <level>",
"# logging level",
"parser",
".",
"add_argument",
"(",
"\"--ll\"",
",",
"\"--log-level\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"dest",
"=",
"\"log_lvl\"",
",",
"default",
"=",
"log",
".",
"LOG_LVL_DEFAULT",
",",
"help",
"=",
"\"set logging level\"",
")",
"# -l, --log-file",
"parser",
".",
"add_argument",
"(",
"\"-l\"",
",",
"\"--log-file\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"log_file\"",
",",
"default",
"=",
"log",
".",
"LOG_FILE_DEFAULT",
",",
"help",
"=",
"\"set log file\"",
")",
"# Absorb the options",
"options",
"=",
"parser",
".",
"parse_args",
"(",
"argv",
")",
"# Set whether we are going to perform a dry run",
"global",
"_opt",
"_opt",
"[",
"\"dry_run\"",
"]",
"=",
"options",
".",
"dry_run",
"# Initiate the log level",
"log",
".",
"init",
"(",
"threshold_lvl",
"=",
"options",
".",
"log_lvl",
",",
"quiet_stdout",
"=",
"options",
".",
"log_quiet",
",",
"log_file",
"=",
"options",
".",
"log_file",
")",
"#",
"# Print the splash",
"#",
"_splash",
"(",
")",
"# Merge configuration with a JSON file",
"config_file",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"options",
".",
"config_file",
")",
"log",
".",
"msg",
"(",
"\"Attempting to use configuration file '{config_file}'\"",
".",
"format",
"(",
"config_file",
"=",
"config_file",
")",
")",
"try",
":",
"config",
".",
"set_from_file",
"(",
"config_file",
")",
"except",
"FileNotFoundError",
":",
"raise",
"FileNotFoundError",
"(",
"\"Configuration file '{config_file}' not found!\"",
".",
"format",
"(",
"config_file",
"=",
"config_file",
")",
")"
]
| Parse arguments from the command line
:param argv: list of arguments | [
"Parse",
"arguments",
"from",
"the",
"command",
"line"
]
| python | train | 32.515152 |
mwouts/jupytext | jupytext/magics.py | https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/magics.py#L46-L55 | def comment_magic(source, language='python', global_escape_flag=True):
"""Escape Jupyter magics with '# '"""
parser = StringParser(language)
next_is_magic = False
for pos, line in enumerate(source):
if not parser.is_quoted() and (next_is_magic or is_magic(line, language, global_escape_flag)):
source[pos] = _COMMENT[language] + ' ' + line
next_is_magic = language == 'python' and _LINE_CONTINUATION_RE.match(line)
parser.read_line(line)
return source | [
"def",
"comment_magic",
"(",
"source",
",",
"language",
"=",
"'python'",
",",
"global_escape_flag",
"=",
"True",
")",
":",
"parser",
"=",
"StringParser",
"(",
"language",
")",
"next_is_magic",
"=",
"False",
"for",
"pos",
",",
"line",
"in",
"enumerate",
"(",
"source",
")",
":",
"if",
"not",
"parser",
".",
"is_quoted",
"(",
")",
"and",
"(",
"next_is_magic",
"or",
"is_magic",
"(",
"line",
",",
"language",
",",
"global_escape_flag",
")",
")",
":",
"source",
"[",
"pos",
"]",
"=",
"_COMMENT",
"[",
"language",
"]",
"+",
"' '",
"+",
"line",
"next_is_magic",
"=",
"language",
"==",
"'python'",
"and",
"_LINE_CONTINUATION_RE",
".",
"match",
"(",
"line",
")",
"parser",
".",
"read_line",
"(",
"line",
")",
"return",
"source"
]
| Escape Jupyter magics with '# | [
"Escape",
"Jupyter",
"magics",
"with",
"#"
]
| python | train | 50.2 |
dossier/dossier.store | dossier/store/elastic.py | https://github.com/dossier/dossier.store/blob/b22ffe2470bba9fcc98a30cb55b437bfa1521e7f/dossier/store/elastic.py#L189-L228 | def put(self, items, indexes=True):
'''Adds feature collections to the store.
This efficiently adds multiple FCs to the store. The iterable
of ``items`` given should yield tuples of ``(content_id, FC)``.
:param items: Iterable of ``(content_id, FC)``.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
'''
actions = []
for cid, fc in items:
# TODO: If we store features in a columnar order, then we
# could tell ES to index the feature values directly. ---AG
# (But is problematic because we want to preserve the ability
# to selectively index FCs. So we'd probably need two distinct
# doc types.)
idxs = defaultdict(list)
if indexes:
for fname in self.indexed_features:
if fname in fc:
idxs[fname_to_idx_name(fname)].extend(fc[fname])
for fname in self.fulltext_indexed_features:
if fname not in fc:
continue
if isinstance(fc[fname], basestring):
idxs[fname_to_full_idx_name(fname)] = fc[fname]
else:
idxs[fname_to_full_idx_name(fname)].extend(fc[fname])
actions.append({
'_index': self.index,
'_type': self.type,
'_id': eid(cid),
'_op_type': 'index',
'_source': dict(idxs, **{
'fc': self.fc_to_dict(fc),
}),
})
bulk(self.conn, actions, timeout=60, request_timeout=60) | [
"def",
"put",
"(",
"self",
",",
"items",
",",
"indexes",
"=",
"True",
")",
":",
"actions",
"=",
"[",
"]",
"for",
"cid",
",",
"fc",
"in",
"items",
":",
"# TODO: If we store features in a columnar order, then we",
"# could tell ES to index the feature values directly. ---AG",
"# (But is problematic because we want to preserve the ability",
"# to selectively index FCs. So we'd probably need two distinct",
"# doc types.)",
"idxs",
"=",
"defaultdict",
"(",
"list",
")",
"if",
"indexes",
":",
"for",
"fname",
"in",
"self",
".",
"indexed_features",
":",
"if",
"fname",
"in",
"fc",
":",
"idxs",
"[",
"fname_to_idx_name",
"(",
"fname",
")",
"]",
".",
"extend",
"(",
"fc",
"[",
"fname",
"]",
")",
"for",
"fname",
"in",
"self",
".",
"fulltext_indexed_features",
":",
"if",
"fname",
"not",
"in",
"fc",
":",
"continue",
"if",
"isinstance",
"(",
"fc",
"[",
"fname",
"]",
",",
"basestring",
")",
":",
"idxs",
"[",
"fname_to_full_idx_name",
"(",
"fname",
")",
"]",
"=",
"fc",
"[",
"fname",
"]",
"else",
":",
"idxs",
"[",
"fname_to_full_idx_name",
"(",
"fname",
")",
"]",
".",
"extend",
"(",
"fc",
"[",
"fname",
"]",
")",
"actions",
".",
"append",
"(",
"{",
"'_index'",
":",
"self",
".",
"index",
",",
"'_type'",
":",
"self",
".",
"type",
",",
"'_id'",
":",
"eid",
"(",
"cid",
")",
",",
"'_op_type'",
":",
"'index'",
",",
"'_source'",
":",
"dict",
"(",
"idxs",
",",
"*",
"*",
"{",
"'fc'",
":",
"self",
".",
"fc_to_dict",
"(",
"fc",
")",
",",
"}",
")",
",",
"}",
")",
"bulk",
"(",
"self",
".",
"conn",
",",
"actions",
",",
"timeout",
"=",
"60",
",",
"request_timeout",
"=",
"60",
")"
]
| Adds feature collections to the store.
This efficiently adds multiple FCs to the store. The iterable
of ``items`` given should yield tuples of ``(content_id, FC)``.
:param items: Iterable of ``(content_id, FC)``.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed. | [
"Adds",
"feature",
"collections",
"to",
"the",
"store",
"."
]
| python | test | 43.425 |
playpauseandstop/Flask-And-Redis | flask_redis.py | https://github.com/playpauseandstop/Flask-And-Redis/blob/878bb193ae4a8c3497f6f1ff1511a3b8c96d08b5/flask_redis.py#L172-L189 | def _build_connection_args(self, klass):
"""Read connection args spec, exclude self from list of possible
:param klass: Redis connection class.
"""
bases = [base for base in klass.__bases__ if base is not object]
all_args = []
for cls in [klass] + bases:
try:
args = inspect.getfullargspec(cls.__init__).args
except AttributeError:
args = inspect.getargspec(cls.__init__).args
for arg in args:
if arg in all_args:
continue
all_args.append(arg)
all_args.remove('self')
return all_args | [
"def",
"_build_connection_args",
"(",
"self",
",",
"klass",
")",
":",
"bases",
"=",
"[",
"base",
"for",
"base",
"in",
"klass",
".",
"__bases__",
"if",
"base",
"is",
"not",
"object",
"]",
"all_args",
"=",
"[",
"]",
"for",
"cls",
"in",
"[",
"klass",
"]",
"+",
"bases",
":",
"try",
":",
"args",
"=",
"inspect",
".",
"getfullargspec",
"(",
"cls",
".",
"__init__",
")",
".",
"args",
"except",
"AttributeError",
":",
"args",
"=",
"inspect",
".",
"getargspec",
"(",
"cls",
".",
"__init__",
")",
".",
"args",
"for",
"arg",
"in",
"args",
":",
"if",
"arg",
"in",
"all_args",
":",
"continue",
"all_args",
".",
"append",
"(",
"arg",
")",
"all_args",
".",
"remove",
"(",
"'self'",
")",
"return",
"all_args"
]
| Read connection args spec, exclude self from list of possible
:param klass: Redis connection class. | [
"Read",
"connection",
"args",
"spec",
"exclude",
"self",
"from",
"list",
"of",
"possible"
]
| python | train | 36.166667 |
broox/python-nuheat | nuheat/thermostat.py | https://github.com/broox/python-nuheat/blob/3a18852dc9465c34cb96eb3a0c84f1a6caa70707/nuheat/thermostat.py#L205-L226 | def set_target_temperature(self, temperature, mode=config.SCHEDULE_HOLD):
"""
Updates the target temperature on the NuHeat API
:param temperature: The desired temperature in NuHeat format
:param permanent: Permanently hold the temperature. If set to False, the schedule will
resume at the next programmed event
"""
if temperature < self.min_temperature:
temperature = self.min_temperature
if temperature > self.max_temperature:
temperature = self.max_temperature
modes = [config.SCHEDULE_TEMPORARY_HOLD, config.SCHEDULE_HOLD]
if mode not in modes:
raise Exception("Invalid mode. Please use one of: {}".format(modes))
self.set_data({
"SetPointTemp": temperature,
"ScheduleMode": mode
}) | [
"def",
"set_target_temperature",
"(",
"self",
",",
"temperature",
",",
"mode",
"=",
"config",
".",
"SCHEDULE_HOLD",
")",
":",
"if",
"temperature",
"<",
"self",
".",
"min_temperature",
":",
"temperature",
"=",
"self",
".",
"min_temperature",
"if",
"temperature",
">",
"self",
".",
"max_temperature",
":",
"temperature",
"=",
"self",
".",
"max_temperature",
"modes",
"=",
"[",
"config",
".",
"SCHEDULE_TEMPORARY_HOLD",
",",
"config",
".",
"SCHEDULE_HOLD",
"]",
"if",
"mode",
"not",
"in",
"modes",
":",
"raise",
"Exception",
"(",
"\"Invalid mode. Please use one of: {}\"",
".",
"format",
"(",
"modes",
")",
")",
"self",
".",
"set_data",
"(",
"{",
"\"SetPointTemp\"",
":",
"temperature",
",",
"\"ScheduleMode\"",
":",
"mode",
"}",
")"
]
| Updates the target temperature on the NuHeat API
:param temperature: The desired temperature in NuHeat format
:param permanent: Permanently hold the temperature. If set to False, the schedule will
resume at the next programmed event | [
"Updates",
"the",
"target",
"temperature",
"on",
"the",
"NuHeat",
"API"
]
| python | valid | 38.272727 |
dhermes/bezier | docs/make_images.py | https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/docs/make_images.py#L381-L419 | def surface_evaluate_cartesian_multi(surface, points):
"""Image for :meth`.Surface.evaluate_cartesian_multi` docstring."""
if NO_IMAGES:
return
ax = surface.plot(256)
ax.plot(
points[0, :], points[1, :], color="black", linestyle="None", marker="o"
)
delta = 1.0 / 32.0
font_size = 18
ax.text(
points[0, 0],
points[1, 0],
r"$w_0$",
fontsize=font_size,
verticalalignment="top",
horizontalalignment="right",
)
ax.text(
points[0, 1] + 2 * delta,
points[1, 1],
r"$w_1$",
fontsize=font_size,
verticalalignment="center",
horizontalalignment="left",
)
ax.text(
points[0, 2],
points[1, 2] + delta,
r"$w_2$",
fontsize=font_size,
verticalalignment="bottom",
horizontalalignment="left",
)
ax.axis("scaled")
ax.set_xlim(-3.125, 2.375)
ax.set_ylim(-0.25, 2.125)
save_image(ax.figure, "surface_evaluate_cartesian_multi.png") | [
"def",
"surface_evaluate_cartesian_multi",
"(",
"surface",
",",
"points",
")",
":",
"if",
"NO_IMAGES",
":",
"return",
"ax",
"=",
"surface",
".",
"plot",
"(",
"256",
")",
"ax",
".",
"plot",
"(",
"points",
"[",
"0",
",",
":",
"]",
",",
"points",
"[",
"1",
",",
":",
"]",
",",
"color",
"=",
"\"black\"",
",",
"linestyle",
"=",
"\"None\"",
",",
"marker",
"=",
"\"o\"",
")",
"delta",
"=",
"1.0",
"/",
"32.0",
"font_size",
"=",
"18",
"ax",
".",
"text",
"(",
"points",
"[",
"0",
",",
"0",
"]",
",",
"points",
"[",
"1",
",",
"0",
"]",
",",
"r\"$w_0$\"",
",",
"fontsize",
"=",
"font_size",
",",
"verticalalignment",
"=",
"\"top\"",
",",
"horizontalalignment",
"=",
"\"right\"",
",",
")",
"ax",
".",
"text",
"(",
"points",
"[",
"0",
",",
"1",
"]",
"+",
"2",
"*",
"delta",
",",
"points",
"[",
"1",
",",
"1",
"]",
",",
"r\"$w_1$\"",
",",
"fontsize",
"=",
"font_size",
",",
"verticalalignment",
"=",
"\"center\"",
",",
"horizontalalignment",
"=",
"\"left\"",
",",
")",
"ax",
".",
"text",
"(",
"points",
"[",
"0",
",",
"2",
"]",
",",
"points",
"[",
"1",
",",
"2",
"]",
"+",
"delta",
",",
"r\"$w_2$\"",
",",
"fontsize",
"=",
"font_size",
",",
"verticalalignment",
"=",
"\"bottom\"",
",",
"horizontalalignment",
"=",
"\"left\"",
",",
")",
"ax",
".",
"axis",
"(",
"\"scaled\"",
")",
"ax",
".",
"set_xlim",
"(",
"-",
"3.125",
",",
"2.375",
")",
"ax",
".",
"set_ylim",
"(",
"-",
"0.25",
",",
"2.125",
")",
"save_image",
"(",
"ax",
".",
"figure",
",",
"\"surface_evaluate_cartesian_multi.png\"",
")"
]
| Image for :meth`.Surface.evaluate_cartesian_multi` docstring. | [
"Image",
"for",
":",
"meth",
".",
"Surface",
".",
"evaluate_cartesian_multi",
"docstring",
"."
]
| python | train | 25.641026 |
fossasia/knittingpattern | knittingpattern/convert/Layout.py | https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/convert/Layout.py#L246-L249 | def _row_should_be_placed(self, row, position):
""":return: whether to place this instruction"""
placed_row = self._rows_in_grid.get(row)
return placed_row is None or placed_row.y < position.y | [
"def",
"_row_should_be_placed",
"(",
"self",
",",
"row",
",",
"position",
")",
":",
"placed_row",
"=",
"self",
".",
"_rows_in_grid",
".",
"get",
"(",
"row",
")",
"return",
"placed_row",
"is",
"None",
"or",
"placed_row",
".",
"y",
"<",
"position",
".",
"y"
]
| :return: whether to place this instruction | [
":",
"return",
":",
"whether",
"to",
"place",
"this",
"instruction"
]
| python | valid | 53.25 |
markovmodel/msmtools | msmtools/analysis/dense/fingerprints.py | https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/analysis/dense/fingerprints.py#L175-L202 | def correlation(P, obs1, obs2=None, times=[1], k=None):
r"""Time-correlation for equilibrium experiment.
Parameters
----------
P : (M, M) ndarray
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : list of int (optional)
List of times (in tau) at which to compute correlation
k : int (optional)
Number of eigenvectors and eigenvalues to use for computation
Returns
-------
correlations : ndarray
Correlation values at given times
"""
M = P.shape[0]
T = np.asarray(times).max()
if T < M:
return correlation_matvec(P, obs1, obs2=obs2, times=times)
else:
return correlation_decomp(P, obs1, obs2=obs2, times=times, k=k) | [
"def",
"correlation",
"(",
"P",
",",
"obs1",
",",
"obs2",
"=",
"None",
",",
"times",
"=",
"[",
"1",
"]",
",",
"k",
"=",
"None",
")",
":",
"M",
"=",
"P",
".",
"shape",
"[",
"0",
"]",
"T",
"=",
"np",
".",
"asarray",
"(",
"times",
")",
".",
"max",
"(",
")",
"if",
"T",
"<",
"M",
":",
"return",
"correlation_matvec",
"(",
"P",
",",
"obs1",
",",
"obs2",
"=",
"obs2",
",",
"times",
"=",
"times",
")",
"else",
":",
"return",
"correlation_decomp",
"(",
"P",
",",
"obs1",
",",
"obs2",
"=",
"obs2",
",",
"times",
"=",
"times",
",",
"k",
"=",
"k",
")"
]
| r"""Time-correlation for equilibrium experiment.
Parameters
----------
P : (M, M) ndarray
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : list of int (optional)
List of times (in tau) at which to compute correlation
k : int (optional)
Number of eigenvectors and eigenvalues to use for computation
Returns
-------
correlations : ndarray
Correlation values at given times | [
"r",
"Time",
"-",
"correlation",
"for",
"equilibrium",
"experiment",
"."
]
| python | train | 29.821429 |
bitesofcode/projexui | projexui/widgets/xorbquerywidget/xorbquickfilterwidget.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbquerywidget/xorbquickfilterwidget.py#L83-L94 | def query(self):
"""
Builds the query for this quick filter.
:return <orb.Query>
"""
output = Query()
for column, op, plugin, editor in self._plugins:
query = Query(column)
if plugin.setupQuery(query, op, editor):
output &= query
return output | [
"def",
"query",
"(",
"self",
")",
":",
"output",
"=",
"Query",
"(",
")",
"for",
"column",
",",
"op",
",",
"plugin",
",",
"editor",
"in",
"self",
".",
"_plugins",
":",
"query",
"=",
"Query",
"(",
"column",
")",
"if",
"plugin",
".",
"setupQuery",
"(",
"query",
",",
"op",
",",
"editor",
")",
":",
"output",
"&=",
"query",
"return",
"output"
]
| Builds the query for this quick filter.
:return <orb.Query> | [
"Builds",
"the",
"query",
"for",
"this",
"quick",
"filter",
".",
":",
"return",
"<orb",
".",
"Query",
">"
]
| python | train | 29.333333 |
PolyJIT/benchbuild | benchbuild/reports/__init__.py | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/reports/__init__.py#L17-L41 | def discover():
"""
Import all experiments listed in *_PLUGINS_REPORTS.
Tests:
>>> from benchbuild.settings import CFG
>>> from benchbuild.reports import discover
>>> import logging as lg
>>> import sys
>>> l = lg.getLogger('benchbuild')
>>> l.setLevel(lg.DEBUG)
>>> l.handlers = [lg.StreamHandler(stream=sys.stdout)]
>>> CFG["plugins"]["reports"] = ["benchbuild.non.existing", "benchbuild.reports.raw"]
>>> discover()
Could not find 'benchbuild.non.existing'
Found report: benchbuild.reports.raw
"""
if CFG["plugins"]["autoload"]:
report_plugins = CFG["plugins"]["reports"].value
for plugin in report_plugins:
try:
importlib.import_module(plugin)
LOG.debug("Found report: %s", plugin)
except ImportError:
LOG.error("Could not find '%s'", plugin) | [
"def",
"discover",
"(",
")",
":",
"if",
"CFG",
"[",
"\"plugins\"",
"]",
"[",
"\"autoload\"",
"]",
":",
"report_plugins",
"=",
"CFG",
"[",
"\"plugins\"",
"]",
"[",
"\"reports\"",
"]",
".",
"value",
"for",
"plugin",
"in",
"report_plugins",
":",
"try",
":",
"importlib",
".",
"import_module",
"(",
"plugin",
")",
"LOG",
".",
"debug",
"(",
"\"Found report: %s\"",
",",
"plugin",
")",
"except",
"ImportError",
":",
"LOG",
".",
"error",
"(",
"\"Could not find '%s'\"",
",",
"plugin",
")"
]
| Import all experiments listed in *_PLUGINS_REPORTS.
Tests:
>>> from benchbuild.settings import CFG
>>> from benchbuild.reports import discover
>>> import logging as lg
>>> import sys
>>> l = lg.getLogger('benchbuild')
>>> l.setLevel(lg.DEBUG)
>>> l.handlers = [lg.StreamHandler(stream=sys.stdout)]
>>> CFG["plugins"]["reports"] = ["benchbuild.non.existing", "benchbuild.reports.raw"]
>>> discover()
Could not find 'benchbuild.non.existing'
Found report: benchbuild.reports.raw | [
"Import",
"all",
"experiments",
"listed",
"in",
"*",
"_PLUGINS_REPORTS",
"."
]
| python | train | 36.76 |
rdo-management/python-rdomanager-oscplugin | rdomanager_oscplugin/utils.py | https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L361-L368 | def remove_known_hosts(overcloud_ip):
"""For a given IP address remove SSH keys from the known_hosts file"""
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command) | [
"def",
"remove_known_hosts",
"(",
"overcloud_ip",
")",
":",
"known_hosts",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/.ssh/known_hosts\"",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"known_hosts",
")",
":",
"command",
"=",
"[",
"'ssh-keygen'",
",",
"'-R'",
",",
"overcloud_ip",
",",
"'-f'",
",",
"known_hosts",
"]",
"subprocess",
".",
"check_call",
"(",
"command",
")"
]
| For a given IP address remove SSH keys from the known_hosts file | [
"For",
"a",
"given",
"IP",
"address",
"remove",
"SSH",
"keys",
"from",
"the",
"known_hosts",
"file"
]
| python | train | 39.125 |
secdev/scapy | scapy/layers/ntp.py | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/ntp.py#L212-L220 | def haslayer(self, cls):
"""Specific: NTPHeader().haslayer(NTP) should return True."""
if cls == "NTP":
if isinstance(self, NTP):
return True
elif issubtype(cls, NTP):
if isinstance(self, cls):
return True
return super(NTP, self).haslayer(cls) | [
"def",
"haslayer",
"(",
"self",
",",
"cls",
")",
":",
"if",
"cls",
"==",
"\"NTP\"",
":",
"if",
"isinstance",
"(",
"self",
",",
"NTP",
")",
":",
"return",
"True",
"elif",
"issubtype",
"(",
"cls",
",",
"NTP",
")",
":",
"if",
"isinstance",
"(",
"self",
",",
"cls",
")",
":",
"return",
"True",
"return",
"super",
"(",
"NTP",
",",
"self",
")",
".",
"haslayer",
"(",
"cls",
")"
]
| Specific: NTPHeader().haslayer(NTP) should return True. | [
"Specific",
":",
"NTPHeader",
"()",
".",
"haslayer",
"(",
"NTP",
")",
"should",
"return",
"True",
"."
]
| python | train | 35.888889 |
codelv/enaml-native | src/enamlnative/android/android_wifi.py | https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_wifi.py#L401-L417 | def request_permission(cls, permissions):
""" Requests permission and returns an future result that returns a
boolean indicating if all the given permission were granted or denied.
"""
app = AndroidApplication.instance()
f = app.create_future()
def on_result(perms):
allowed = True
for p in permissions:
allowed = allowed and perms.get(p, False)
f.set_result(allowed)
app.request_permissions(permissions).then(on_result)
return f | [
"def",
"request_permission",
"(",
"cls",
",",
"permissions",
")",
":",
"app",
"=",
"AndroidApplication",
".",
"instance",
"(",
")",
"f",
"=",
"app",
".",
"create_future",
"(",
")",
"def",
"on_result",
"(",
"perms",
")",
":",
"allowed",
"=",
"True",
"for",
"p",
"in",
"permissions",
":",
"allowed",
"=",
"allowed",
"and",
"perms",
".",
"get",
"(",
"p",
",",
"False",
")",
"f",
".",
"set_result",
"(",
"allowed",
")",
"app",
".",
"request_permissions",
"(",
"permissions",
")",
".",
"then",
"(",
"on_result",
")",
"return",
"f"
]
| Requests permission and returns an future result that returns a
boolean indicating if all the given permission were granted or denied. | [
"Requests",
"permission",
"and",
"returns",
"an",
"future",
"result",
"that",
"returns",
"a",
"boolean",
"indicating",
"if",
"all",
"the",
"given",
"permission",
"were",
"granted",
"or",
"denied",
"."
]
| python | train | 31.941176 |
Arubacloud/pyArubaCloud | ArubaCloud/base/vm.py | https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/base/vm.py#L6-L24 | def find(self, name):
"""
Return a list of subset of VM that match the pattern name
@param name (str): the vm name of the virtual machine
@param name (Obj): the vm object that represent the virtual
machine (can be Pro or Smart)
@return (list): the subset containing the serach result.
"""
if name.__class__ is 'base.Server.Pro' or name.__class__ is 'base.Server.Smart':
# print('DEBUG: matched VM object %s' % name.__class__)
pattern = name.vm_name
else:
# print('DEBUG: matched Str Object %s' % name.__class__)
pattern = name
# 14/06/2013: since this method is called within a thread and I wont to pass the return objects with queue or
# call back, I will allocate a list inside the Interface class object itself, which contain all of the vm found
# 02/11/2015: this must be changed ASAP! it's a mess this way... what was I thinking??
self.last_search_result = [vm for vm in self if pattern in vm.vm_name]
return self.last_search_result | [
"def",
"find",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
".",
"__class__",
"is",
"'base.Server.Pro'",
"or",
"name",
".",
"__class__",
"is",
"'base.Server.Smart'",
":",
"# print('DEBUG: matched VM object %s' % name.__class__)",
"pattern",
"=",
"name",
".",
"vm_name",
"else",
":",
"# print('DEBUG: matched Str Object %s' % name.__class__)",
"pattern",
"=",
"name",
"# 14/06/2013: since this method is called within a thread and I wont to pass the return objects with queue or",
"# call back, I will allocate a list inside the Interface class object itself, which contain all of the vm found",
"# 02/11/2015: this must be changed ASAP! it's a mess this way... what was I thinking??",
"self",
".",
"last_search_result",
"=",
"[",
"vm",
"for",
"vm",
"in",
"self",
"if",
"pattern",
"in",
"vm",
".",
"vm_name",
"]",
"return",
"self",
".",
"last_search_result"
]
| Return a list of subset of VM that match the pattern name
@param name (str): the vm name of the virtual machine
@param name (Obj): the vm object that represent the virtual
machine (can be Pro or Smart)
@return (list): the subset containing the serach result. | [
"Return",
"a",
"list",
"of",
"subset",
"of",
"VM",
"that",
"match",
"the",
"pattern",
"name"
]
| python | train | 57.789474 |
kdeldycke/maildir-deduplicate | maildir_deduplicate/mail.py | https://github.com/kdeldycke/maildir-deduplicate/blob/f1c6ff25b80c6c1a4dc2dc7a65b34d808b0b7733/maildir_deduplicate/mail.py#L99-L114 | def body_lines(self):
""" Return a normalized list of lines from message's body. """
if not self.message.is_multipart():
body = self.message.get_payload(None, decode=True)
else:
_, _, body = self.message.as_string().partition("\n\n")
if isinstance(body, bytes):
for enc in ['ascii', 'utf-8']:
try:
body = body.decode(enc)
break
except UnicodeDecodeError:
continue
else:
body = self.message.get_payload(None, decode=False)
return body.splitlines(True) | [
"def",
"body_lines",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"message",
".",
"is_multipart",
"(",
")",
":",
"body",
"=",
"self",
".",
"message",
".",
"get_payload",
"(",
"None",
",",
"decode",
"=",
"True",
")",
"else",
":",
"_",
",",
"_",
",",
"body",
"=",
"self",
".",
"message",
".",
"as_string",
"(",
")",
".",
"partition",
"(",
"\"\\n\\n\"",
")",
"if",
"isinstance",
"(",
"body",
",",
"bytes",
")",
":",
"for",
"enc",
"in",
"[",
"'ascii'",
",",
"'utf-8'",
"]",
":",
"try",
":",
"body",
"=",
"body",
".",
"decode",
"(",
"enc",
")",
"break",
"except",
"UnicodeDecodeError",
":",
"continue",
"else",
":",
"body",
"=",
"self",
".",
"message",
".",
"get_payload",
"(",
"None",
",",
"decode",
"=",
"False",
")",
"return",
"body",
".",
"splitlines",
"(",
"True",
")"
]
| Return a normalized list of lines from message's body. | [
"Return",
"a",
"normalized",
"list",
"of",
"lines",
"from",
"message",
"s",
"body",
"."
]
| python | train | 39.4375 |
streamlink/streamlink | src/streamlink_cli/main.py | https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink_cli/main.py#L718-L760 | def setup_http_session():
"""Sets the global HTTP settings, such as proxy and headers."""
if args.http_proxy:
streamlink.set_option("http-proxy", args.http_proxy)
if args.https_proxy:
streamlink.set_option("https-proxy", args.https_proxy)
if args.http_cookie:
streamlink.set_option("http-cookies", dict(args.http_cookie))
if args.http_header:
streamlink.set_option("http-headers", dict(args.http_header))
if args.http_query_param:
streamlink.set_option("http-query-params", dict(args.http_query_param))
if args.http_ignore_env:
streamlink.set_option("http-trust-env", False)
if args.http_no_ssl_verify:
streamlink.set_option("http-ssl-verify", False)
if args.http_disable_dh:
streamlink.set_option("http-disable-dh", True)
if args.http_ssl_cert:
streamlink.set_option("http-ssl-cert", args.http_ssl_cert)
if args.http_ssl_cert_crt_key:
streamlink.set_option("http-ssl-cert", tuple(args.http_ssl_cert_crt_key))
if args.http_timeout:
streamlink.set_option("http-timeout", args.http_timeout)
if args.http_cookies:
streamlink.set_option("http-cookies", args.http_cookies)
if args.http_headers:
streamlink.set_option("http-headers", args.http_headers)
if args.http_query_params:
streamlink.set_option("http-query-params", args.http_query_params) | [
"def",
"setup_http_session",
"(",
")",
":",
"if",
"args",
".",
"http_proxy",
":",
"streamlink",
".",
"set_option",
"(",
"\"http-proxy\"",
",",
"args",
".",
"http_proxy",
")",
"if",
"args",
".",
"https_proxy",
":",
"streamlink",
".",
"set_option",
"(",
"\"https-proxy\"",
",",
"args",
".",
"https_proxy",
")",
"if",
"args",
".",
"http_cookie",
":",
"streamlink",
".",
"set_option",
"(",
"\"http-cookies\"",
",",
"dict",
"(",
"args",
".",
"http_cookie",
")",
")",
"if",
"args",
".",
"http_header",
":",
"streamlink",
".",
"set_option",
"(",
"\"http-headers\"",
",",
"dict",
"(",
"args",
".",
"http_header",
")",
")",
"if",
"args",
".",
"http_query_param",
":",
"streamlink",
".",
"set_option",
"(",
"\"http-query-params\"",
",",
"dict",
"(",
"args",
".",
"http_query_param",
")",
")",
"if",
"args",
".",
"http_ignore_env",
":",
"streamlink",
".",
"set_option",
"(",
"\"http-trust-env\"",
",",
"False",
")",
"if",
"args",
".",
"http_no_ssl_verify",
":",
"streamlink",
".",
"set_option",
"(",
"\"http-ssl-verify\"",
",",
"False",
")",
"if",
"args",
".",
"http_disable_dh",
":",
"streamlink",
".",
"set_option",
"(",
"\"http-disable-dh\"",
",",
"True",
")",
"if",
"args",
".",
"http_ssl_cert",
":",
"streamlink",
".",
"set_option",
"(",
"\"http-ssl-cert\"",
",",
"args",
".",
"http_ssl_cert",
")",
"if",
"args",
".",
"http_ssl_cert_crt_key",
":",
"streamlink",
".",
"set_option",
"(",
"\"http-ssl-cert\"",
",",
"tuple",
"(",
"args",
".",
"http_ssl_cert_crt_key",
")",
")",
"if",
"args",
".",
"http_timeout",
":",
"streamlink",
".",
"set_option",
"(",
"\"http-timeout\"",
",",
"args",
".",
"http_timeout",
")",
"if",
"args",
".",
"http_cookies",
":",
"streamlink",
".",
"set_option",
"(",
"\"http-cookies\"",
",",
"args",
".",
"http_cookies",
")",
"if",
"args",
".",
"http_headers",
":",
"streamlink",
".",
"set_option",
"(",
"\"http-headers\"",
",",
"args",
".",
"http_headers",
")",
"if",
"args",
".",
"http_query_params",
":",
"streamlink",
".",
"set_option",
"(",
"\"http-query-params\"",
",",
"args",
".",
"http_query_params",
")"
]
| Sets the global HTTP settings, such as proxy and headers. | [
"Sets",
"the",
"global",
"HTTP",
"settings",
"such",
"as",
"proxy",
"and",
"headers",
"."
]
| python | test | 32.162791 |
AtteqCom/zsl | src/zsl/utils/email_helper.py | https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/utils/email_helper.py#L16-L63 | def send_email(sender, receivers, subject, text=None, html=None, charset='utf-8', config=Injected):
"""Sends an email.
:param sender: Sender as string or None for default got from config.
:param receivers: String or array of recipients.
:param subject: Subject.
:param text: Plain text message.
:param html: Html message.
:param charset: Charset.
:param config: Current configuration
"""
smtp_config = config['SMTP']
# Receivers must be an array.
if not isinstance(receivers, list) and not isinstance(receivers, tuple):
receivers = [receivers]
# Create the messages
msgs = []
if text is not None:
msgs.append(MIMEText(text, 'plain', charset))
if html is not None:
msgs.append(MIMEText(html, 'html', charset))
if len(msgs) == 0:
raise Exception("No message is given.")
if len(msgs) == 1:
msg = msgs[0]
else:
msg = MIMEMultipart()
for m in msgs:
msg.attach(m)
# Default sender.
if sender is None:
sender = smtp_config['SENDER']
# Fill the info.
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = ", ".join(receivers)
# Send.
smtp_server = smtplib.SMTP(**(smtp_config['SERVER']))
smtp_server.sendmail(sender, receivers, msg.as_string())
smtp_server.quit() | [
"def",
"send_email",
"(",
"sender",
",",
"receivers",
",",
"subject",
",",
"text",
"=",
"None",
",",
"html",
"=",
"None",
",",
"charset",
"=",
"'utf-8'",
",",
"config",
"=",
"Injected",
")",
":",
"smtp_config",
"=",
"config",
"[",
"'SMTP'",
"]",
"# Receivers must be an array.",
"if",
"not",
"isinstance",
"(",
"receivers",
",",
"list",
")",
"and",
"not",
"isinstance",
"(",
"receivers",
",",
"tuple",
")",
":",
"receivers",
"=",
"[",
"receivers",
"]",
"# Create the messages",
"msgs",
"=",
"[",
"]",
"if",
"text",
"is",
"not",
"None",
":",
"msgs",
".",
"append",
"(",
"MIMEText",
"(",
"text",
",",
"'plain'",
",",
"charset",
")",
")",
"if",
"html",
"is",
"not",
"None",
":",
"msgs",
".",
"append",
"(",
"MIMEText",
"(",
"html",
",",
"'html'",
",",
"charset",
")",
")",
"if",
"len",
"(",
"msgs",
")",
"==",
"0",
":",
"raise",
"Exception",
"(",
"\"No message is given.\"",
")",
"if",
"len",
"(",
"msgs",
")",
"==",
"1",
":",
"msg",
"=",
"msgs",
"[",
"0",
"]",
"else",
":",
"msg",
"=",
"MIMEMultipart",
"(",
")",
"for",
"m",
"in",
"msgs",
":",
"msg",
".",
"attach",
"(",
"m",
")",
"# Default sender.",
"if",
"sender",
"is",
"None",
":",
"sender",
"=",
"smtp_config",
"[",
"'SENDER'",
"]",
"# Fill the info.",
"msg",
"[",
"'Subject'",
"]",
"=",
"subject",
"msg",
"[",
"'From'",
"]",
"=",
"sender",
"msg",
"[",
"'To'",
"]",
"=",
"\", \"",
".",
"join",
"(",
"receivers",
")",
"# Send.",
"smtp_server",
"=",
"smtplib",
".",
"SMTP",
"(",
"*",
"*",
"(",
"smtp_config",
"[",
"'SERVER'",
"]",
")",
")",
"smtp_server",
".",
"sendmail",
"(",
"sender",
",",
"receivers",
",",
"msg",
".",
"as_string",
"(",
")",
")",
"smtp_server",
".",
"quit",
"(",
")"
]
| Sends an email.
:param sender: Sender as string or None for default got from config.
:param receivers: String or array of recipients.
:param subject: Subject.
:param text: Plain text message.
:param html: Html message.
:param charset: Charset.
:param config: Current configuration | [
"Sends",
"an",
"email",
"."
]
| python | train | 27.354167 |
twisted/epsilon | epsilon/extime.py | https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/extime.py#L393-L568 | def fromISO8601TimeAndDate(klass, iso8601string, tzinfo=None):
"""Return a new Time instance from a string formated as in ISO 8601.
If the given string contains no timezone, it is assumed to be in the
timezone specified by the parameter `tzinfo`, or UTC if tzinfo is None.
An input string with an explicit timezone will always override tzinfo.
If the given iso8601string does not contain all parts of the time, they
will default to 0 in the timezone given by `tzinfo`.
WARNING: this function is incomplete. ISO is dumb and their standards
are not free. Only a subset of all valid ISO 8601 dates are parsed,
because I can't find a formal description of the format. However,
common ones should work.
"""
def calculateTimezone():
if groups['zulu'] == 'Z':
return FixedOffset(0, 0)
else:
tzhour = groups.pop('tzhour')
tzmin = groups.pop('tzmin')
if tzhour is not None:
return FixedOffset(int(tzhour), int(tzmin or 0))
return tzinfo or FixedOffset(0, 0)
def coerceGroups():
groups['month'] = groups['month1'] or groups['month2']
groups['week'] = groups['week1'] or groups['week2']
# don't include fractional seconds, because it's not an integer.
defaultTo0 = ['hour', 'minute', 'second']
defaultTo1 = ['month', 'day', 'week', 'weekday', 'dayofyear']
if groups['fractionalsec'] is None:
groups['fractionalsec'] = '0'
for key in defaultTo0:
if groups[key] is None:
groups[key] = 0
for key in defaultTo1:
if groups[key] is None:
groups[key] = 1
groups['fractionalsec'] = float('.'+groups['fractionalsec'])
for key in defaultTo0 + defaultTo1 + ['year']:
groups[key] = int(groups[key])
for group, min, max in [
# some years have only 52 weeks
('week', 1, 53),
('weekday', 1, 7),
('month', 1, 12),
('day', 1, 31),
('hour', 0, 24),
('minute', 0, 59),
# Sometime in the 22nd century AD, two leap seconds will be
# required every year. In the 25th century AD, four every
# year. We'll ignore that for now though because it would be
# tricky to get right and we certainly don't need it for our
# target applications. In other words, post-singularity
# Martian users, please do not rely on this code for
# compatibility with Greater Galactic Protectorate of Earth
# date/time formatting! Apologies, but no library I know of in
# Python is sufficient for processing their dates and times
# without ADA bindings to get the radiation-safety zone counter
# correct. -glyph
('second', 0, 61),
# don't forget leap years
('dayofyear', 1, 366)]:
if not min <= groups[group] <= max:
raise ValueError, '%s must be in %i..%i' % (group, min, max)
def determineResolution():
if match.group('fractionalsec') is not None:
return max(datetime.timedelta.resolution,
datetime.timedelta(
microseconds=1 * 10 ** -len(
match.group('fractionalsec')) * 1000000))
for testGroup, resolution in [
('second', datetime.timedelta(seconds=1)),
('minute', datetime.timedelta(minutes=1)),
('hour', datetime.timedelta(hours=1)),
('weekday', datetime.timedelta(days=1)),
('dayofyear', datetime.timedelta(days=1)),
('day', datetime.timedelta(days=1)),
('week1', datetime.timedelta(weeks=1)),
('week2', datetime.timedelta(weeks=1))]:
if match.group(testGroup) is not None:
return resolution
if match.group('month1') is not None \
or match.group('month2') is not None:
if self._time.month == 12:
return datetime.timedelta(days=31)
nextMonth = self._time.replace(month=self._time.month+1)
return nextMonth - self._time
else:
nextYear = self._time.replace(year=self._time.year+1)
return nextYear - self._time
def calculateDtime(tzinfo):
"""Calculate a datetime for the start of the addressed period."""
if match.group('week1') is not None \
or match.group('week2') is not None:
if not 0 < groups['week'] <= 53:
raise ValueError(
'week must be in 1..53 (was %i)' % (groups['week'],))
dtime = datetime.datetime(
groups['year'],
1,
4,
groups['hour'],
groups['minute'],
groups['second'],
int(round(groups['fractionalsec'] * 1000000)),
tzinfo=tzinfo
)
dtime -= datetime.timedelta(days = dtime.weekday())
dtime += datetime.timedelta(
days = (groups['week']-1) * 7 + groups['weekday'] - 1)
if dtime.isocalendar() != (
groups['year'], groups['week'], groups['weekday']):
# actually the problem could be an error in my logic, but
# nothing should cause this but requesting week 53 of a
# year with 52 weeks.
raise ValueError('year %04i has no week %02i' %
(groups['year'], groups['week']))
return dtime
if match.group('dayofyear') is not None:
dtime = datetime.datetime(
groups['year'],
1,
1,
groups['hour'],
groups['minute'],
groups['second'],
int(round(groups['fractionalsec'] * 1000000)),
tzinfo=tzinfo
)
dtime += datetime.timedelta(days=groups['dayofyear']-1)
if dtime.year != groups['year']:
raise ValueError(
'year %04i has no day of year %03i' %
(groups['year'], groups['dayofyear']))
return dtime
else:
return datetime.datetime(
groups['year'],
groups['month'],
groups['day'],
groups['hour'],
groups['minute'],
groups['second'],
int(round(groups['fractionalsec'] * 1000000)),
tzinfo=tzinfo
)
match = klass.iso8601pattern.match(iso8601string)
if match is None:
raise ValueError(
'%r could not be parsed as an ISO 8601 date and time' %
(iso8601string,))
groups = match.groupdict()
coerceGroups()
if match.group('hour') is not None:
timezone = calculateTimezone()
else:
timezone = None
self = klass.fromDatetime(calculateDtime(timezone))
self.resolution = determineResolution()
return self | [
"def",
"fromISO8601TimeAndDate",
"(",
"klass",
",",
"iso8601string",
",",
"tzinfo",
"=",
"None",
")",
":",
"def",
"calculateTimezone",
"(",
")",
":",
"if",
"groups",
"[",
"'zulu'",
"]",
"==",
"'Z'",
":",
"return",
"FixedOffset",
"(",
"0",
",",
"0",
")",
"else",
":",
"tzhour",
"=",
"groups",
".",
"pop",
"(",
"'tzhour'",
")",
"tzmin",
"=",
"groups",
".",
"pop",
"(",
"'tzmin'",
")",
"if",
"tzhour",
"is",
"not",
"None",
":",
"return",
"FixedOffset",
"(",
"int",
"(",
"tzhour",
")",
",",
"int",
"(",
"tzmin",
"or",
"0",
")",
")",
"return",
"tzinfo",
"or",
"FixedOffset",
"(",
"0",
",",
"0",
")",
"def",
"coerceGroups",
"(",
")",
":",
"groups",
"[",
"'month'",
"]",
"=",
"groups",
"[",
"'month1'",
"]",
"or",
"groups",
"[",
"'month2'",
"]",
"groups",
"[",
"'week'",
"]",
"=",
"groups",
"[",
"'week1'",
"]",
"or",
"groups",
"[",
"'week2'",
"]",
"# don't include fractional seconds, because it's not an integer.",
"defaultTo0",
"=",
"[",
"'hour'",
",",
"'minute'",
",",
"'second'",
"]",
"defaultTo1",
"=",
"[",
"'month'",
",",
"'day'",
",",
"'week'",
",",
"'weekday'",
",",
"'dayofyear'",
"]",
"if",
"groups",
"[",
"'fractionalsec'",
"]",
"is",
"None",
":",
"groups",
"[",
"'fractionalsec'",
"]",
"=",
"'0'",
"for",
"key",
"in",
"defaultTo0",
":",
"if",
"groups",
"[",
"key",
"]",
"is",
"None",
":",
"groups",
"[",
"key",
"]",
"=",
"0",
"for",
"key",
"in",
"defaultTo1",
":",
"if",
"groups",
"[",
"key",
"]",
"is",
"None",
":",
"groups",
"[",
"key",
"]",
"=",
"1",
"groups",
"[",
"'fractionalsec'",
"]",
"=",
"float",
"(",
"'.'",
"+",
"groups",
"[",
"'fractionalsec'",
"]",
")",
"for",
"key",
"in",
"defaultTo0",
"+",
"defaultTo1",
"+",
"[",
"'year'",
"]",
":",
"groups",
"[",
"key",
"]",
"=",
"int",
"(",
"groups",
"[",
"key",
"]",
")",
"for",
"group",
",",
"min",
",",
"max",
"in",
"[",
"# some years have only 52 weeks",
"(",
"'week'",
",",
"1",
",",
"53",
")",
",",
"(",
"'weekday'",
",",
"1",
",",
"7",
")",
",",
"(",
"'month'",
",",
"1",
",",
"12",
")",
",",
"(",
"'day'",
",",
"1",
",",
"31",
")",
",",
"(",
"'hour'",
",",
"0",
",",
"24",
")",
",",
"(",
"'minute'",
",",
"0",
",",
"59",
")",
",",
"# Sometime in the 22nd century AD, two leap seconds will be",
"# required every year. In the 25th century AD, four every",
"# year. We'll ignore that for now though because it would be",
"# tricky to get right and we certainly don't need it for our",
"# target applications. In other words, post-singularity",
"# Martian users, please do not rely on this code for",
"# compatibility with Greater Galactic Protectorate of Earth",
"# date/time formatting! Apologies, but no library I know of in",
"# Python is sufficient for processing their dates and times",
"# without ADA bindings to get the radiation-safety zone counter",
"# correct. -glyph",
"(",
"'second'",
",",
"0",
",",
"61",
")",
",",
"# don't forget leap years",
"(",
"'dayofyear'",
",",
"1",
",",
"366",
")",
"]",
":",
"if",
"not",
"min",
"<=",
"groups",
"[",
"group",
"]",
"<=",
"max",
":",
"raise",
"ValueError",
",",
"'%s must be in %i..%i'",
"%",
"(",
"group",
",",
"min",
",",
"max",
")",
"def",
"determineResolution",
"(",
")",
":",
"if",
"match",
".",
"group",
"(",
"'fractionalsec'",
")",
"is",
"not",
"None",
":",
"return",
"max",
"(",
"datetime",
".",
"timedelta",
".",
"resolution",
",",
"datetime",
".",
"timedelta",
"(",
"microseconds",
"=",
"1",
"*",
"10",
"**",
"-",
"len",
"(",
"match",
".",
"group",
"(",
"'fractionalsec'",
")",
")",
"*",
"1000000",
")",
")",
"for",
"testGroup",
",",
"resolution",
"in",
"[",
"(",
"'second'",
",",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"1",
")",
")",
",",
"(",
"'minute'",
",",
"datetime",
".",
"timedelta",
"(",
"minutes",
"=",
"1",
")",
")",
",",
"(",
"'hour'",
",",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"1",
")",
")",
",",
"(",
"'weekday'",
",",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
")",
",",
"(",
"'dayofyear'",
",",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
")",
",",
"(",
"'day'",
",",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
")",
",",
"(",
"'week1'",
",",
"datetime",
".",
"timedelta",
"(",
"weeks",
"=",
"1",
")",
")",
",",
"(",
"'week2'",
",",
"datetime",
".",
"timedelta",
"(",
"weeks",
"=",
"1",
")",
")",
"]",
":",
"if",
"match",
".",
"group",
"(",
"testGroup",
")",
"is",
"not",
"None",
":",
"return",
"resolution",
"if",
"match",
".",
"group",
"(",
"'month1'",
")",
"is",
"not",
"None",
"or",
"match",
".",
"group",
"(",
"'month2'",
")",
"is",
"not",
"None",
":",
"if",
"self",
".",
"_time",
".",
"month",
"==",
"12",
":",
"return",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"31",
")",
"nextMonth",
"=",
"self",
".",
"_time",
".",
"replace",
"(",
"month",
"=",
"self",
".",
"_time",
".",
"month",
"+",
"1",
")",
"return",
"nextMonth",
"-",
"self",
".",
"_time",
"else",
":",
"nextYear",
"=",
"self",
".",
"_time",
".",
"replace",
"(",
"year",
"=",
"self",
".",
"_time",
".",
"year",
"+",
"1",
")",
"return",
"nextYear",
"-",
"self",
".",
"_time",
"def",
"calculateDtime",
"(",
"tzinfo",
")",
":",
"\"\"\"Calculate a datetime for the start of the addressed period.\"\"\"",
"if",
"match",
".",
"group",
"(",
"'week1'",
")",
"is",
"not",
"None",
"or",
"match",
".",
"group",
"(",
"'week2'",
")",
"is",
"not",
"None",
":",
"if",
"not",
"0",
"<",
"groups",
"[",
"'week'",
"]",
"<=",
"53",
":",
"raise",
"ValueError",
"(",
"'week must be in 1..53 (was %i)'",
"%",
"(",
"groups",
"[",
"'week'",
"]",
",",
")",
")",
"dtime",
"=",
"datetime",
".",
"datetime",
"(",
"groups",
"[",
"'year'",
"]",
",",
"1",
",",
"4",
",",
"groups",
"[",
"'hour'",
"]",
",",
"groups",
"[",
"'minute'",
"]",
",",
"groups",
"[",
"'second'",
"]",
",",
"int",
"(",
"round",
"(",
"groups",
"[",
"'fractionalsec'",
"]",
"*",
"1000000",
")",
")",
",",
"tzinfo",
"=",
"tzinfo",
")",
"dtime",
"-=",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"dtime",
".",
"weekday",
"(",
")",
")",
"dtime",
"+=",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"(",
"groups",
"[",
"'week'",
"]",
"-",
"1",
")",
"*",
"7",
"+",
"groups",
"[",
"'weekday'",
"]",
"-",
"1",
")",
"if",
"dtime",
".",
"isocalendar",
"(",
")",
"!=",
"(",
"groups",
"[",
"'year'",
"]",
",",
"groups",
"[",
"'week'",
"]",
",",
"groups",
"[",
"'weekday'",
"]",
")",
":",
"# actually the problem could be an error in my logic, but",
"# nothing should cause this but requesting week 53 of a",
"# year with 52 weeks.",
"raise",
"ValueError",
"(",
"'year %04i has no week %02i'",
"%",
"(",
"groups",
"[",
"'year'",
"]",
",",
"groups",
"[",
"'week'",
"]",
")",
")",
"return",
"dtime",
"if",
"match",
".",
"group",
"(",
"'dayofyear'",
")",
"is",
"not",
"None",
":",
"dtime",
"=",
"datetime",
".",
"datetime",
"(",
"groups",
"[",
"'year'",
"]",
",",
"1",
",",
"1",
",",
"groups",
"[",
"'hour'",
"]",
",",
"groups",
"[",
"'minute'",
"]",
",",
"groups",
"[",
"'second'",
"]",
",",
"int",
"(",
"round",
"(",
"groups",
"[",
"'fractionalsec'",
"]",
"*",
"1000000",
")",
")",
",",
"tzinfo",
"=",
"tzinfo",
")",
"dtime",
"+=",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"groups",
"[",
"'dayofyear'",
"]",
"-",
"1",
")",
"if",
"dtime",
".",
"year",
"!=",
"groups",
"[",
"'year'",
"]",
":",
"raise",
"ValueError",
"(",
"'year %04i has no day of year %03i'",
"%",
"(",
"groups",
"[",
"'year'",
"]",
",",
"groups",
"[",
"'dayofyear'",
"]",
")",
")",
"return",
"dtime",
"else",
":",
"return",
"datetime",
".",
"datetime",
"(",
"groups",
"[",
"'year'",
"]",
",",
"groups",
"[",
"'month'",
"]",
",",
"groups",
"[",
"'day'",
"]",
",",
"groups",
"[",
"'hour'",
"]",
",",
"groups",
"[",
"'minute'",
"]",
",",
"groups",
"[",
"'second'",
"]",
",",
"int",
"(",
"round",
"(",
"groups",
"[",
"'fractionalsec'",
"]",
"*",
"1000000",
")",
")",
",",
"tzinfo",
"=",
"tzinfo",
")",
"match",
"=",
"klass",
".",
"iso8601pattern",
".",
"match",
"(",
"iso8601string",
")",
"if",
"match",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'%r could not be parsed as an ISO 8601 date and time'",
"%",
"(",
"iso8601string",
",",
")",
")",
"groups",
"=",
"match",
".",
"groupdict",
"(",
")",
"coerceGroups",
"(",
")",
"if",
"match",
".",
"group",
"(",
"'hour'",
")",
"is",
"not",
"None",
":",
"timezone",
"=",
"calculateTimezone",
"(",
")",
"else",
":",
"timezone",
"=",
"None",
"self",
"=",
"klass",
".",
"fromDatetime",
"(",
"calculateDtime",
"(",
"timezone",
")",
")",
"self",
".",
"resolution",
"=",
"determineResolution",
"(",
")",
"return",
"self"
]
| Return a new Time instance from a string formated as in ISO 8601.
If the given string contains no timezone, it is assumed to be in the
timezone specified by the parameter `tzinfo`, or UTC if tzinfo is None.
An input string with an explicit timezone will always override tzinfo.
If the given iso8601string does not contain all parts of the time, they
will default to 0 in the timezone given by `tzinfo`.
WARNING: this function is incomplete. ISO is dumb and their standards
are not free. Only a subset of all valid ISO 8601 dates are parsed,
because I can't find a formal description of the format. However,
common ones should work. | [
"Return",
"a",
"new",
"Time",
"instance",
"from",
"a",
"string",
"formated",
"as",
"in",
"ISO",
"8601",
"."
]
| python | train | 43.056818 |
jaraco/hgtools | hgtools/managers/cmd.py | https://github.com/jaraco/hgtools/blob/bf5fe2324e5ae15e012487f95f0c97c3775c5d2e/hgtools/managers/cmd.py#L93-L110 | def _read_tags_for_revset(self, spec):
"""
Return TaggedRevision for each tag/rev combination in the revset spec
"""
cmd = [
'log', '--style', 'default', '--config', 'defaults.log=',
'-r', spec]
res = self._invoke(*cmd)
header_pattern = re.compile(r'(?P<header>\w+?):\s+(?P<value>.*)')
match_res = map(header_pattern.match, res.splitlines())
matched_lines = filter(None, match_res)
matches = (match.groupdict() for match in matched_lines)
for match in matches:
if match['header'] == 'changeset':
id, sep, rev = match['value'].partition(':')
if match['header'] == 'tag':
tag = match['value']
yield TaggedRevision(tag, rev) | [
"def",
"_read_tags_for_revset",
"(",
"self",
",",
"spec",
")",
":",
"cmd",
"=",
"[",
"'log'",
",",
"'--style'",
",",
"'default'",
",",
"'--config'",
",",
"'defaults.log='",
",",
"'-r'",
",",
"spec",
"]",
"res",
"=",
"self",
".",
"_invoke",
"(",
"*",
"cmd",
")",
"header_pattern",
"=",
"re",
".",
"compile",
"(",
"r'(?P<header>\\w+?):\\s+(?P<value>.*)'",
")",
"match_res",
"=",
"map",
"(",
"header_pattern",
".",
"match",
",",
"res",
".",
"splitlines",
"(",
")",
")",
"matched_lines",
"=",
"filter",
"(",
"None",
",",
"match_res",
")",
"matches",
"=",
"(",
"match",
".",
"groupdict",
"(",
")",
"for",
"match",
"in",
"matched_lines",
")",
"for",
"match",
"in",
"matches",
":",
"if",
"match",
"[",
"'header'",
"]",
"==",
"'changeset'",
":",
"id",
",",
"sep",
",",
"rev",
"=",
"match",
"[",
"'value'",
"]",
".",
"partition",
"(",
"':'",
")",
"if",
"match",
"[",
"'header'",
"]",
"==",
"'tag'",
":",
"tag",
"=",
"match",
"[",
"'value'",
"]",
"yield",
"TaggedRevision",
"(",
"tag",
",",
"rev",
")"
]
| Return TaggedRevision for each tag/rev combination in the revset spec | [
"Return",
"TaggedRevision",
"for",
"each",
"tag",
"/",
"rev",
"combination",
"in",
"the",
"revset",
"spec"
]
| python | train | 36 |
saltstack/salt | salt/renderers/aws_kms.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/renderers/aws_kms.py#L179-L195 | def _plaintext_data_key():
'''
Return the configured KMS data key decrypted and encoded in urlsafe base64.
Cache the result to minimize API calls to AWS.
'''
response = getattr(_plaintext_data_key, 'response', None)
cache_hit = response is not None
if not cache_hit:
response = _api_decrypt()
setattr(_plaintext_data_key, 'response', response)
key_id = response['KeyId']
plaintext = response['Plaintext']
if hasattr(plaintext, 'encode'):
plaintext = plaintext.encode(__salt_system_encoding__)
log.debug('Using key %s from %s', key_id, 'cache' if cache_hit else 'api call')
return plaintext | [
"def",
"_plaintext_data_key",
"(",
")",
":",
"response",
"=",
"getattr",
"(",
"_plaintext_data_key",
",",
"'response'",
",",
"None",
")",
"cache_hit",
"=",
"response",
"is",
"not",
"None",
"if",
"not",
"cache_hit",
":",
"response",
"=",
"_api_decrypt",
"(",
")",
"setattr",
"(",
"_plaintext_data_key",
",",
"'response'",
",",
"response",
")",
"key_id",
"=",
"response",
"[",
"'KeyId'",
"]",
"plaintext",
"=",
"response",
"[",
"'Plaintext'",
"]",
"if",
"hasattr",
"(",
"plaintext",
",",
"'encode'",
")",
":",
"plaintext",
"=",
"plaintext",
".",
"encode",
"(",
"__salt_system_encoding__",
")",
"log",
".",
"debug",
"(",
"'Using key %s from %s'",
",",
"key_id",
",",
"'cache'",
"if",
"cache_hit",
"else",
"'api call'",
")",
"return",
"plaintext"
]
| Return the configured KMS data key decrypted and encoded in urlsafe base64.
Cache the result to minimize API calls to AWS. | [
"Return",
"the",
"configured",
"KMS",
"data",
"key",
"decrypted",
"and",
"encoded",
"in",
"urlsafe",
"base64",
"."
]
| python | train | 38 |
wmayner/pyphi | pyphi/subsystem.py | https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/subsystem.py#L247-L257 | def apply_cut(self, cut):
"""Return a cut version of this |Subsystem|.
Args:
cut (Cut): The cut to apply to this |Subsystem|.
Returns:
Subsystem: The cut subsystem.
"""
return Subsystem(self.network, self.state, self.node_indices,
cut=cut, mice_cache=self._mice_cache) | [
"def",
"apply_cut",
"(",
"self",
",",
"cut",
")",
":",
"return",
"Subsystem",
"(",
"self",
".",
"network",
",",
"self",
".",
"state",
",",
"self",
".",
"node_indices",
",",
"cut",
"=",
"cut",
",",
"mice_cache",
"=",
"self",
".",
"_mice_cache",
")"
]
| Return a cut version of this |Subsystem|.
Args:
cut (Cut): The cut to apply to this |Subsystem|.
Returns:
Subsystem: The cut subsystem. | [
"Return",
"a",
"cut",
"version",
"of",
"this",
"|Subsystem|",
"."
]
| python | train | 31.727273 |
kragniz/python-etcd3 | etcd3/events.py | https://github.com/kragniz/python-etcd3/blob/0adb14840d4a6011a2023a13f07e247e4c336a80/etcd3/events.py#L26-L41 | def new_event(event):
"""
Wrap a raw gRPC event in a friendlier containing class.
This picks the appropriate class from one of PutEvent or DeleteEvent and
returns a new instance.
"""
op_name = event.EventType.DESCRIPTOR.values_by_number[event.type].name
if op_name == 'PUT':
cls = PutEvent
elif op_name == 'DELETE':
cls = DeleteEvent
else:
raise Exception('Invalid op_name')
return cls(event) | [
"def",
"new_event",
"(",
"event",
")",
":",
"op_name",
"=",
"event",
".",
"EventType",
".",
"DESCRIPTOR",
".",
"values_by_number",
"[",
"event",
".",
"type",
"]",
".",
"name",
"if",
"op_name",
"==",
"'PUT'",
":",
"cls",
"=",
"PutEvent",
"elif",
"op_name",
"==",
"'DELETE'",
":",
"cls",
"=",
"DeleteEvent",
"else",
":",
"raise",
"Exception",
"(",
"'Invalid op_name'",
")",
"return",
"cls",
"(",
"event",
")"
]
| Wrap a raw gRPC event in a friendlier containing class.
This picks the appropriate class from one of PutEvent or DeleteEvent and
returns a new instance. | [
"Wrap",
"a",
"raw",
"gRPC",
"event",
"in",
"a",
"friendlier",
"containing",
"class",
"."
]
| python | train | 27.6875 |
pandas-dev/pandas | pandas/core/indexes/multi.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L49-L77 | def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
------
int_keys : scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits:
codes <<= self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer:
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1) | [
"def",
"_codes_to_ints",
"(",
"self",
",",
"codes",
")",
":",
"# Shift the representation of each level by the pre-calculated number",
"# of bits:",
"codes",
"<<=",
"self",
".",
"offsets",
"# Now sum and OR are in fact interchangeable. This is a simple",
"# composition of the (disjunct) significant bits of each level (i.e.",
"# each column in \"codes\") in a single positive integer:",
"if",
"codes",
".",
"ndim",
"==",
"1",
":",
"# Single key",
"return",
"np",
".",
"bitwise_or",
".",
"reduce",
"(",
"codes",
")",
"# Multiple keys",
"return",
"np",
".",
"bitwise_or",
".",
"reduce",
"(",
"codes",
",",
"axis",
"=",
"1",
")"
]
| Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
------
int_keys : scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each). | [
"Transform",
"combination",
"(",
"s",
")",
"of",
"uint64",
"in",
"one",
"uint64",
"(",
"each",
")",
"in",
"a",
"strictly",
"monotonic",
"way",
"(",
"i",
".",
"e",
".",
"respecting",
"the",
"lexicographic",
"order",
"of",
"integer",
"combinations",
")",
":",
"see",
"BaseMultiIndexCodesEngine",
"documentation",
"."
]
| python | train | 36.965517 |
vtkiorg/vtki | vtki/common.py | https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/common.py#L536-L541 | def copy_meta_from(self, ido):
"""Copies vtki meta data onto this object from another object"""
self._active_scalar_info = ido.active_scalar_info
self._active_vectors_info = ido.active_vectors_info
if hasattr(ido, '_textures'):
self._textures = ido._textures | [
"def",
"copy_meta_from",
"(",
"self",
",",
"ido",
")",
":",
"self",
".",
"_active_scalar_info",
"=",
"ido",
".",
"active_scalar_info",
"self",
".",
"_active_vectors_info",
"=",
"ido",
".",
"active_vectors_info",
"if",
"hasattr",
"(",
"ido",
",",
"'_textures'",
")",
":",
"self",
".",
"_textures",
"=",
"ido",
".",
"_textures"
]
| Copies vtki meta data onto this object from another object | [
"Copies",
"vtki",
"meta",
"data",
"onto",
"this",
"object",
"from",
"another",
"object"
]
| python | train | 49.5 |
Azure/azure-cosmos-python | azure/cosmos/base.py | https://github.com/Azure/azure-cosmos-python/blob/dd01b3c5d308c6da83cfcaa0ab7083351a476353/azure/cosmos/base.py#L217-L252 | def GetResourceIdOrFullNameFromLink(resource_link):
"""Gets resource id or full name from resource link.
:param str resource_link:
:return:
The resource id or full name from the resource link.
:rtype: str
"""
# For named based, the resource link is the full name
if IsNameBased(resource_link):
return TrimBeginningAndEndingSlashes(resource_link)
# Padding the resource link with leading and trailing slashes if not already
if resource_link[-1] != '/':
resource_link = resource_link + '/'
if resource_link[0] != '/':
resource_link = '/' + resource_link
# The path will be in the form of
# /[resourceType]/[resourceId]/ .... /[resourceType]/[resourceId]/ or
# /[resourceType]/[resourceId]/ .... /[resourceType]/
# The result of split will be in the form of
# ["", [resourceType], [resourceId] ... ,[resourceType], [resourceId], ""]
# In the first case, to extract the resourceId it will the element
# before last ( at length -2 ) and the the type will before it
# ( at length -3 )
# In the second case, to extract the resource type it will the element
# before last ( at length -2 )
path_parts = resource_link.split("/")
if len(path_parts) % 2 == 0:
# request in form
# /[resourceType]/[resourceId]/ .... /[resourceType]/[resourceId]/.
return str(path_parts[-2])
return None | [
"def",
"GetResourceIdOrFullNameFromLink",
"(",
"resource_link",
")",
":",
"# For named based, the resource link is the full name",
"if",
"IsNameBased",
"(",
"resource_link",
")",
":",
"return",
"TrimBeginningAndEndingSlashes",
"(",
"resource_link",
")",
"# Padding the resource link with leading and trailing slashes if not already",
"if",
"resource_link",
"[",
"-",
"1",
"]",
"!=",
"'/'",
":",
"resource_link",
"=",
"resource_link",
"+",
"'/'",
"if",
"resource_link",
"[",
"0",
"]",
"!=",
"'/'",
":",
"resource_link",
"=",
"'/'",
"+",
"resource_link",
"# The path will be in the form of ",
"# /[resourceType]/[resourceId]/ .... /[resourceType]/[resourceId]/ or",
"# /[resourceType]/[resourceId]/ .... /[resourceType]/",
"# The result of split will be in the form of",
"# [\"\", [resourceType], [resourceId] ... ,[resourceType], [resourceId], \"\"]",
"# In the first case, to extract the resourceId it will the element",
"# before last ( at length -2 ) and the the type will before it",
"# ( at length -3 )",
"# In the second case, to extract the resource type it will the element",
"# before last ( at length -2 )",
"path_parts",
"=",
"resource_link",
".",
"split",
"(",
"\"/\"",
")",
"if",
"len",
"(",
"path_parts",
")",
"%",
"2",
"==",
"0",
":",
"# request in form",
"# /[resourceType]/[resourceId]/ .... /[resourceType]/[resourceId]/.",
"return",
"str",
"(",
"path_parts",
"[",
"-",
"2",
"]",
")",
"return",
"None"
]
| Gets resource id or full name from resource link.
:param str resource_link:
:return:
The resource id or full name from the resource link.
:rtype: str | [
"Gets",
"resource",
"id",
"or",
"full",
"name",
"from",
"resource",
"link",
"."
]
| python | train | 38.75 |
gwastro/pycbc-glue | pycbc_glue/pipeline.py | https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/pipeline.py#L2501-L2511 | def set_end(self,time,pass_to_command_line=True):
"""
Set the GPS end time of the analysis node by setting a --gps-end-time
option to the node when it is executed.
@param time: GPS end time of job.
@bool pass_to_command_line: add gps-end-time as variable option.
"""
if pass_to_command_line:
self.add_var_opt('gps-end-time',time)
self.__end = time
self.__data_end = time | [
"def",
"set_end",
"(",
"self",
",",
"time",
",",
"pass_to_command_line",
"=",
"True",
")",
":",
"if",
"pass_to_command_line",
":",
"self",
".",
"add_var_opt",
"(",
"'gps-end-time'",
",",
"time",
")",
"self",
".",
"__end",
"=",
"time",
"self",
".",
"__data_end",
"=",
"time"
]
| Set the GPS end time of the analysis node by setting a --gps-end-time
option to the node when it is executed.
@param time: GPS end time of job.
@bool pass_to_command_line: add gps-end-time as variable option. | [
"Set",
"the",
"GPS",
"end",
"time",
"of",
"the",
"analysis",
"node",
"by",
"setting",
"a",
"--",
"gps",
"-",
"end",
"-",
"time",
"option",
"to",
"the",
"node",
"when",
"it",
"is",
"executed",
"."
]
| python | train | 36.545455 |
rupertford/melody | src/melody/inputs.py | https://github.com/rupertford/melody/blob/d50459880a87fdd1802c6893f6e12b52d51b3b91/src/melody/inputs.py#L155-L180 | def create_input(option, template_name, template_location="template"):
'''create an input file using jinja2 by filling a template
with the values from the option variable passed in.'''
# restructure option list into jinja2 input format
jinja2_input = {}
for item in option:
try:
jinja2_input.update(item)
except ValueError:
raise RuntimeError(
("inputs.py, create_input : format of item '{0}' is not "
"supported. Expecting a dictionary.".format(str(item))))
# load the template and fill it with the option variable contents
import jinja2
try:
template_loader = jinja2.FileSystemLoader(searchpath=template_location)
template_env = jinja2.Environment(loader=template_loader)
template = template_env.get_template(template_name)
output_text = template.render(jinja2_input)
except jinja2.TemplateNotFound:
raise RuntimeError("template '{0}' not found".format(template_name))
# return the particular input file as a string
return output_text | [
"def",
"create_input",
"(",
"option",
",",
"template_name",
",",
"template_location",
"=",
"\"template\"",
")",
":",
"# restructure option list into jinja2 input format",
"jinja2_input",
"=",
"{",
"}",
"for",
"item",
"in",
"option",
":",
"try",
":",
"jinja2_input",
".",
"update",
"(",
"item",
")",
"except",
"ValueError",
":",
"raise",
"RuntimeError",
"(",
"(",
"\"inputs.py, create_input : format of item '{0}' is not \"",
"\"supported. Expecting a dictionary.\"",
".",
"format",
"(",
"str",
"(",
"item",
")",
")",
")",
")",
"# load the template and fill it with the option variable contents",
"import",
"jinja2",
"try",
":",
"template_loader",
"=",
"jinja2",
".",
"FileSystemLoader",
"(",
"searchpath",
"=",
"template_location",
")",
"template_env",
"=",
"jinja2",
".",
"Environment",
"(",
"loader",
"=",
"template_loader",
")",
"template",
"=",
"template_env",
".",
"get_template",
"(",
"template_name",
")",
"output_text",
"=",
"template",
".",
"render",
"(",
"jinja2_input",
")",
"except",
"jinja2",
".",
"TemplateNotFound",
":",
"raise",
"RuntimeError",
"(",
"\"template '{0}' not found\"",
".",
"format",
"(",
"template_name",
")",
")",
"# return the particular input file as a string",
"return",
"output_text"
]
| create an input file using jinja2 by filling a template
with the values from the option variable passed in. | [
"create",
"an",
"input",
"file",
"using",
"jinja2",
"by",
"filling",
"a",
"template",
"with",
"the",
"values",
"from",
"the",
"option",
"variable",
"passed",
"in",
"."
]
| python | test | 41.192308 |
OSSOS/MOP | src/ossos/core/ossos/storage.py | https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/storage.py#L629-L701 | def _cutout_expnum(observation, sky_coord, radius):
"""
Get a cutout from an exposure based on the RA/DEC location.
@param observation: The Observation object that contains the expusre number information.
@type observation: Observation
@param sky_coord: which RA/DEC is needed,
@type sky_coord: SkyCoord
@param radius:
@type radius: Quantity
@return: HDUList containing the cutout image.
@rtype: list(HDUList)
"""
uri = observation.get_image_uri()
cutout_filehandle = tempfile.NamedTemporaryFile()
disposition_filename = client.copy(uri + "({},{},{})".format(sky_coord.ra.to('degree').value,
sky_coord.dec.to('degree').value,
radius.to('degree').value),
cutout_filehandle.name,
disposition=True)
cutouts = decompose_content_decomposition(disposition_filename)
cutout_filehandle.seek(0)
hdulist = fits.open(cutout_filehandle)
hdulist.verify('silentfix+ignore')
logger.debug("Initial Length of HDUList: {}".format(len(hdulist)))
# Make sure here is a primaryHDU
if len(hdulist) == 1:
phdu = fits.PrimaryHDU()
phdu.header['ORIGIN'] = "OSSOS"
hdulist.insert(0, phdu)
logger.debug("Final Length of HDUList: {}".format(len(hdulist)))
if len(cutouts) != len(hdulist) - 1:
raise ValueError("Wrong number of cutout structures found in Content-Disposition response.")
for hdu in hdulist[1:]:
cutout = cutouts.pop(0)
if 'ASTLEVEL' not in hdu.header:
print("WARNING: ******* NO ASTLEVEL KEYWORD ********** for {0} ********".format(observation.get_image_uri))
hdu.header['ASTLEVEL'] = 0
hdu.header['EXTNO'] = cutout[0]
naxis1 = hdu.header['NAXIS1']
naxis2 = hdu.header['NAXIS2']
default_datasec = "[{}:{},{}:{}]".format(1, naxis1, 1, naxis2)
datasec = hdu.header.get('DATASEC', default_datasec)
datasec = datasec_to_list(datasec)
corners = datasec
for idx in range(len(corners)):
try:
corners[idx] = int(cutout[idx+1])
except Exception:
pass
hdu.header['DATASEC'] = reset_datasec("[{}:{},{}:{}]".format(corners[0],
corners[1],
corners[2],
corners[3]),
hdu.header.get('DATASEC', default_datasec),
hdu.header['NAXIS1'],
hdu.header['NAXIS2'])
hdu.header['XOFFSET'] = int(corners[0]) - 1
hdu.header['YOFFSET'] = int(corners[2]) - 1
hdu.converter = CoordinateConverter(hdu.header['XOFFSET'], hdu.header['YOFFSET'])
try:
hdu.wcs = WCS(hdu.header)
except Exception as ex:
logger.error("Failed trying to initialize the WCS for {}".format(uri))
raise ex
logger.debug("Sending back {}".format(hdulist))
return hdulist | [
"def",
"_cutout_expnum",
"(",
"observation",
",",
"sky_coord",
",",
"radius",
")",
":",
"uri",
"=",
"observation",
".",
"get_image_uri",
"(",
")",
"cutout_filehandle",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
")",
"disposition_filename",
"=",
"client",
".",
"copy",
"(",
"uri",
"+",
"\"({},{},{})\"",
".",
"format",
"(",
"sky_coord",
".",
"ra",
".",
"to",
"(",
"'degree'",
")",
".",
"value",
",",
"sky_coord",
".",
"dec",
".",
"to",
"(",
"'degree'",
")",
".",
"value",
",",
"radius",
".",
"to",
"(",
"'degree'",
")",
".",
"value",
")",
",",
"cutout_filehandle",
".",
"name",
",",
"disposition",
"=",
"True",
")",
"cutouts",
"=",
"decompose_content_decomposition",
"(",
"disposition_filename",
")",
"cutout_filehandle",
".",
"seek",
"(",
"0",
")",
"hdulist",
"=",
"fits",
".",
"open",
"(",
"cutout_filehandle",
")",
"hdulist",
".",
"verify",
"(",
"'silentfix+ignore'",
")",
"logger",
".",
"debug",
"(",
"\"Initial Length of HDUList: {}\"",
".",
"format",
"(",
"len",
"(",
"hdulist",
")",
")",
")",
"# Make sure here is a primaryHDU",
"if",
"len",
"(",
"hdulist",
")",
"==",
"1",
":",
"phdu",
"=",
"fits",
".",
"PrimaryHDU",
"(",
")",
"phdu",
".",
"header",
"[",
"'ORIGIN'",
"]",
"=",
"\"OSSOS\"",
"hdulist",
".",
"insert",
"(",
"0",
",",
"phdu",
")",
"logger",
".",
"debug",
"(",
"\"Final Length of HDUList: {}\"",
".",
"format",
"(",
"len",
"(",
"hdulist",
")",
")",
")",
"if",
"len",
"(",
"cutouts",
")",
"!=",
"len",
"(",
"hdulist",
")",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Wrong number of cutout structures found in Content-Disposition response.\"",
")",
"for",
"hdu",
"in",
"hdulist",
"[",
"1",
":",
"]",
":",
"cutout",
"=",
"cutouts",
".",
"pop",
"(",
"0",
")",
"if",
"'ASTLEVEL'",
"not",
"in",
"hdu",
".",
"header",
":",
"print",
"(",
"\"WARNING: ******* NO ASTLEVEL KEYWORD ********** for {0} ********\"",
".",
"format",
"(",
"observation",
".",
"get_image_uri",
")",
")",
"hdu",
".",
"header",
"[",
"'ASTLEVEL'",
"]",
"=",
"0",
"hdu",
".",
"header",
"[",
"'EXTNO'",
"]",
"=",
"cutout",
"[",
"0",
"]",
"naxis1",
"=",
"hdu",
".",
"header",
"[",
"'NAXIS1'",
"]",
"naxis2",
"=",
"hdu",
".",
"header",
"[",
"'NAXIS2'",
"]",
"default_datasec",
"=",
"\"[{}:{},{}:{}]\"",
".",
"format",
"(",
"1",
",",
"naxis1",
",",
"1",
",",
"naxis2",
")",
"datasec",
"=",
"hdu",
".",
"header",
".",
"get",
"(",
"'DATASEC'",
",",
"default_datasec",
")",
"datasec",
"=",
"datasec_to_list",
"(",
"datasec",
")",
"corners",
"=",
"datasec",
"for",
"idx",
"in",
"range",
"(",
"len",
"(",
"corners",
")",
")",
":",
"try",
":",
"corners",
"[",
"idx",
"]",
"=",
"int",
"(",
"cutout",
"[",
"idx",
"+",
"1",
"]",
")",
"except",
"Exception",
":",
"pass",
"hdu",
".",
"header",
"[",
"'DATASEC'",
"]",
"=",
"reset_datasec",
"(",
"\"[{}:{},{}:{}]\"",
".",
"format",
"(",
"corners",
"[",
"0",
"]",
",",
"corners",
"[",
"1",
"]",
",",
"corners",
"[",
"2",
"]",
",",
"corners",
"[",
"3",
"]",
")",
",",
"hdu",
".",
"header",
".",
"get",
"(",
"'DATASEC'",
",",
"default_datasec",
")",
",",
"hdu",
".",
"header",
"[",
"'NAXIS1'",
"]",
",",
"hdu",
".",
"header",
"[",
"'NAXIS2'",
"]",
")",
"hdu",
".",
"header",
"[",
"'XOFFSET'",
"]",
"=",
"int",
"(",
"corners",
"[",
"0",
"]",
")",
"-",
"1",
"hdu",
".",
"header",
"[",
"'YOFFSET'",
"]",
"=",
"int",
"(",
"corners",
"[",
"2",
"]",
")",
"-",
"1",
"hdu",
".",
"converter",
"=",
"CoordinateConverter",
"(",
"hdu",
".",
"header",
"[",
"'XOFFSET'",
"]",
",",
"hdu",
".",
"header",
"[",
"'YOFFSET'",
"]",
")",
"try",
":",
"hdu",
".",
"wcs",
"=",
"WCS",
"(",
"hdu",
".",
"header",
")",
"except",
"Exception",
"as",
"ex",
":",
"logger",
".",
"error",
"(",
"\"Failed trying to initialize the WCS for {}\"",
".",
"format",
"(",
"uri",
")",
")",
"raise",
"ex",
"logger",
".",
"debug",
"(",
"\"Sending back {}\"",
".",
"format",
"(",
"hdulist",
")",
")",
"return",
"hdulist"
]
| Get a cutout from an exposure based on the RA/DEC location.
@param observation: The Observation object that contains the expusre number information.
@type observation: Observation
@param sky_coord: which RA/DEC is needed,
@type sky_coord: SkyCoord
@param radius:
@type radius: Quantity
@return: HDUList containing the cutout image.
@rtype: list(HDUList) | [
"Get",
"a",
"cutout",
"from",
"an",
"exposure",
"based",
"on",
"the",
"RA",
"/",
"DEC",
"location",
"."
]
| python | train | 44.753425 |
saltstack/salt | salt/crypt.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/crypt.py#L383-L407 | def __get_keys(self, name='master', passphrase=None):
'''
Returns a key object for a key in the pki-dir
'''
path = os.path.join(self.opts['pki_dir'],
name + '.pem')
if not os.path.exists(path):
log.info('Generating %s keys: %s', name, self.opts['pki_dir'])
gen_keys(self.opts['pki_dir'],
name,
self.opts['keysize'],
self.opts.get('user'),
passphrase)
if HAS_M2:
key_error = RSA.RSAError
else:
key_error = ValueError
try:
key = get_rsa_key(path, passphrase)
except key_error as e:
message = 'Unable to read key: {0}; passphrase may be incorrect'.format(path)
log.error(message)
raise MasterExit(message)
log.debug('Loaded %s key: %s', name, path)
return key | [
"def",
"__get_keys",
"(",
"self",
",",
"name",
"=",
"'master'",
",",
"passphrase",
"=",
"None",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"opts",
"[",
"'pki_dir'",
"]",
",",
"name",
"+",
"'.pem'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"log",
".",
"info",
"(",
"'Generating %s keys: %s'",
",",
"name",
",",
"self",
".",
"opts",
"[",
"'pki_dir'",
"]",
")",
"gen_keys",
"(",
"self",
".",
"opts",
"[",
"'pki_dir'",
"]",
",",
"name",
",",
"self",
".",
"opts",
"[",
"'keysize'",
"]",
",",
"self",
".",
"opts",
".",
"get",
"(",
"'user'",
")",
",",
"passphrase",
")",
"if",
"HAS_M2",
":",
"key_error",
"=",
"RSA",
".",
"RSAError",
"else",
":",
"key_error",
"=",
"ValueError",
"try",
":",
"key",
"=",
"get_rsa_key",
"(",
"path",
",",
"passphrase",
")",
"except",
"key_error",
"as",
"e",
":",
"message",
"=",
"'Unable to read key: {0}; passphrase may be incorrect'",
".",
"format",
"(",
"path",
")",
"log",
".",
"error",
"(",
"message",
")",
"raise",
"MasterExit",
"(",
"message",
")",
"log",
".",
"debug",
"(",
"'Loaded %s key: %s'",
",",
"name",
",",
"path",
")",
"return",
"key"
]
| Returns a key object for a key in the pki-dir | [
"Returns",
"a",
"key",
"object",
"for",
"a",
"key",
"in",
"the",
"pki",
"-",
"dir"
]
| python | train | 37.12 |
rigetti/quantumflow | quantumflow/gates.py | https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/gates.py#L110-L113 | def almost_hermitian(gate: Gate) -> bool:
"""Return true if gate tensor is (almost) Hermitian"""
return np.allclose(asarray(gate.asoperator()),
asarray(gate.H.asoperator())) | [
"def",
"almost_hermitian",
"(",
"gate",
":",
"Gate",
")",
"->",
"bool",
":",
"return",
"np",
".",
"allclose",
"(",
"asarray",
"(",
"gate",
".",
"asoperator",
"(",
")",
")",
",",
"asarray",
"(",
"gate",
".",
"H",
".",
"asoperator",
"(",
")",
")",
")"
]
| Return true if gate tensor is (almost) Hermitian | [
"Return",
"true",
"if",
"gate",
"tensor",
"is",
"(",
"almost",
")",
"Hermitian"
]
| python | train | 50.25 |
bwohlberg/sporco | sporco/cupy/_cp_util.py | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/cupy/_cp_util.py#L40-L64 | def cupy_wrapper(func):
"""A wrapper function that converts numpy ndarray arguments to cupy
arrays, and convert any cupy arrays returned by the wrapped
function into numpy ndarrays.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
args = list(args)
for n, a in enumerate(args):
if isinstance(a, np.ndarray):
args[n] = cp.asarray(a)
for k, v in kwargs.items():
if isinstance(v, np.ndarray):
kwargs[k] = cp.asarray(v)
rtn = func(*args, **kwargs)
if isinstance(rtn, (list, tuple)):
for n, a in enumerate(rtn):
if isinstance(a, cp.core.core.ndarray):
rtn[n] = cp.asnumpy(a)
else:
if isinstance(rtn, cp.core.core.ndarray):
rtn = cp.asnumpy(rtn)
return rtn
return wrapped | [
"def",
"cupy_wrapper",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"list",
"(",
"args",
")",
"for",
"n",
",",
"a",
"in",
"enumerate",
"(",
"args",
")",
":",
"if",
"isinstance",
"(",
"a",
",",
"np",
".",
"ndarray",
")",
":",
"args",
"[",
"n",
"]",
"=",
"cp",
".",
"asarray",
"(",
"a",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"np",
".",
"ndarray",
")",
":",
"kwargs",
"[",
"k",
"]",
"=",
"cp",
".",
"asarray",
"(",
"v",
")",
"rtn",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"isinstance",
"(",
"rtn",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"n",
",",
"a",
"in",
"enumerate",
"(",
"rtn",
")",
":",
"if",
"isinstance",
"(",
"a",
",",
"cp",
".",
"core",
".",
"core",
".",
"ndarray",
")",
":",
"rtn",
"[",
"n",
"]",
"=",
"cp",
".",
"asnumpy",
"(",
"a",
")",
"else",
":",
"if",
"isinstance",
"(",
"rtn",
",",
"cp",
".",
"core",
".",
"core",
".",
"ndarray",
")",
":",
"rtn",
"=",
"cp",
".",
"asnumpy",
"(",
"rtn",
")",
"return",
"rtn",
"return",
"wrapped"
]
| A wrapper function that converts numpy ndarray arguments to cupy
arrays, and convert any cupy arrays returned by the wrapped
function into numpy ndarrays. | [
"A",
"wrapper",
"function",
"that",
"converts",
"numpy",
"ndarray",
"arguments",
"to",
"cupy",
"arrays",
"and",
"convert",
"any",
"cupy",
"arrays",
"returned",
"by",
"the",
"wrapped",
"function",
"into",
"numpy",
"ndarrays",
"."
]
| python | train | 34.64 |
mikeboers/MultiMap | multimap.py | https://github.com/mikeboers/MultiMap/blob/0251e5d5df693cc247b4ac5b95adfdd10e3bec04/multimap.py#L492-L521 | def setall(self, key, values):
"""Set more than one value for a given key.
Replaces all the existing values for the given key with new values,
removes extra values that are already set if we don't suply enough,
and appends values to the end if there are not enough existing spots.
>>> m = MutableMultiMap(a=1, b=2, c=3)
>>> m.sort()
>>> m.keys()
['a', 'b', 'c']
>>> m.append(('b', 4))
>>> m.setall('b', [5, 6, 7])
>>> m.allitems()
[('a', 1), ('b', 5), ('c', 3), ('b', 6), ('b', 7)]
"""
key = self._conform_key(key)
values = [self._conform_value(x) for x in values]
ids = self._key_ids[key][:]
while ids and values:
id = ids.pop(0)
value = values.pop(0)
self._pairs[id] = (key, value)
if ids:
self._key_ids[key] = self._key_ids[key][:-len(ids)]
self._remove_pairs(ids)
for value in values:
self._key_ids[key].append(len(self._pairs))
self._pairs.append((key, value)) | [
"def",
"setall",
"(",
"self",
",",
"key",
",",
"values",
")",
":",
"key",
"=",
"self",
".",
"_conform_key",
"(",
"key",
")",
"values",
"=",
"[",
"self",
".",
"_conform_value",
"(",
"x",
")",
"for",
"x",
"in",
"values",
"]",
"ids",
"=",
"self",
".",
"_key_ids",
"[",
"key",
"]",
"[",
":",
"]",
"while",
"ids",
"and",
"values",
":",
"id",
"=",
"ids",
".",
"pop",
"(",
"0",
")",
"value",
"=",
"values",
".",
"pop",
"(",
"0",
")",
"self",
".",
"_pairs",
"[",
"id",
"]",
"=",
"(",
"key",
",",
"value",
")",
"if",
"ids",
":",
"self",
".",
"_key_ids",
"[",
"key",
"]",
"=",
"self",
".",
"_key_ids",
"[",
"key",
"]",
"[",
":",
"-",
"len",
"(",
"ids",
")",
"]",
"self",
".",
"_remove_pairs",
"(",
"ids",
")",
"for",
"value",
"in",
"values",
":",
"self",
".",
"_key_ids",
"[",
"key",
"]",
".",
"append",
"(",
"len",
"(",
"self",
".",
"_pairs",
")",
")",
"self",
".",
"_pairs",
".",
"append",
"(",
"(",
"key",
",",
"value",
")",
")"
]
| Set more than one value for a given key.
Replaces all the existing values for the given key with new values,
removes extra values that are already set if we don't suply enough,
and appends values to the end if there are not enough existing spots.
>>> m = MutableMultiMap(a=1, b=2, c=3)
>>> m.sort()
>>> m.keys()
['a', 'b', 'c']
>>> m.append(('b', 4))
>>> m.setall('b', [5, 6, 7])
>>> m.allitems()
[('a', 1), ('b', 5), ('c', 3), ('b', 6), ('b', 7)] | [
"Set",
"more",
"than",
"one",
"value",
"for",
"a",
"given",
"key",
".",
"Replaces",
"all",
"the",
"existing",
"values",
"for",
"the",
"given",
"key",
"with",
"new",
"values",
"removes",
"extra",
"values",
"that",
"are",
"already",
"set",
"if",
"we",
"don",
"t",
"suply",
"enough",
"and",
"appends",
"values",
"to",
"the",
"end",
"if",
"there",
"are",
"not",
"enough",
"existing",
"spots",
".",
">>>",
"m",
"=",
"MutableMultiMap",
"(",
"a",
"=",
"1",
"b",
"=",
"2",
"c",
"=",
"3",
")",
">>>",
"m",
".",
"sort",
"()",
">>>",
"m",
".",
"keys",
"()",
"[",
"a",
"b",
"c",
"]",
">>>",
"m",
".",
"append",
"((",
"b",
"4",
"))",
">>>",
"m",
".",
"setall",
"(",
"b",
"[",
"5",
"6",
"7",
"]",
")",
">>>",
"m",
".",
"allitems",
"()",
"[",
"(",
"a",
"1",
")",
"(",
"b",
"5",
")",
"(",
"c",
"3",
")",
"(",
"b",
"6",
")",
"(",
"b",
"7",
")",
"]"
]
| python | train | 36.733333 |
kobejohn/PQHelper | pqhelper/ui.py | https://github.com/kobejohn/PQHelper/blob/d2b78a22dcb631794295e6a159b06f39c3f10db6/pqhelper/ui.py#L219-L223 | def _next(self):
"""Get the next summary and present it."""
self.summaries.rotate(-1)
current_summary = self.summaries[0]
self._update_summary(current_summary) | [
"def",
"_next",
"(",
"self",
")",
":",
"self",
".",
"summaries",
".",
"rotate",
"(",
"-",
"1",
")",
"current_summary",
"=",
"self",
".",
"summaries",
"[",
"0",
"]",
"self",
".",
"_update_summary",
"(",
"current_summary",
")"
]
| Get the next summary and present it. | [
"Get",
"the",
"next",
"summary",
"and",
"present",
"it",
"."
]
| python | train | 37.4 |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L3105-L3110 | def addContentLen(self, content, len):
"""Append the extra substring to the node content. NOTE: In
contrast to xmlNodeSetContentLen(), @content is supposed to
be raw text, so unescaped XML special chars are allowed,
entity references are not supported. """
libxml2mod.xmlNodeAddContentLen(self._o, content, len) | [
"def",
"addContentLen",
"(",
"self",
",",
"content",
",",
"len",
")",
":",
"libxml2mod",
".",
"xmlNodeAddContentLen",
"(",
"self",
".",
"_o",
",",
"content",
",",
"len",
")"
]
| Append the extra substring to the node content. NOTE: In
contrast to xmlNodeSetContentLen(), @content is supposed to
be raw text, so unescaped XML special chars are allowed,
entity references are not supported. | [
"Append",
"the",
"extra",
"substring",
"to",
"the",
"node",
"content",
".",
"NOTE",
":",
"In",
"contrast",
"to",
"xmlNodeSetContentLen",
"()"
]
| python | train | 58.833333 |
toejough/pimento | pimento/__init__.py | https://github.com/toejough/pimento/blob/cdb00a93976733aa5521f8504152cedeedfc711a/pimento/__init__.py#L418-L525 | def _cli():
'''CLI interface'''
parser = _argparse.ArgumentParser(
description='''
Present the user with a simple CLI menu, and return the option chosen.
The menu is presented via stderr.
The output is printed to stdout for piping.
'''.format(_VERSION),
epilog='''
The default for the post prompt is "Enter an option to continue: ".
If --default-index is specified, the default option value will be printed
in the post prompt as well.
'''
)
parser.add_argument(
'option',
help='The option(s) to present to the user.',
nargs='*'
)
parser.add_argument(
'--version', '-v',
help='Print the version and then exit',
action='store_true'
)
parser.add_argument(
'--pre', '-p',
help='The pre-prompt/title/introduction to the menu. [%(default)s]',
default='Options:',
metavar='TEXT'
)
parser.add_argument(
'--post', '-P',
help='The prompt presented to the user after the menu items.',
default=_NO_ARG,
metavar='TEXT'
)
parser.add_argument(
'--default-index', '-d',
help='The index of the item to use as the default',
type=int,
metavar='INT'
)
parser.add_argument(
'--indexed', '-i',
help='Print indices with the options, and allow the user to use them to choose.',
action='store_true'
)
parser.add_argument(
'--insensitive', '-I',
help=(
'Perform insensitive matching. Also drops any items that case-insensitively match'
+ ' prior items.'
),
action='store_true'
)
parser.add_argument(
'--fuzzy', '-f',
help='search for the individual words in the user input anywhere in the item strings.',
action='store_true'
)
parser.add_argument(
'--stdout',
help='Use stdout for interactive output (instead of the default: stderr).',
action='store_true'
)
# parse options
args = parser.parse_args()
# argparse nargs is awkward. Translate to be a proper plural.
options = args.option
# set the stream
stream = _sys.stdout if args.stdout else _sys.stderr
# if version, print version and exit
if args.version:
stream.write('Pimento - v{}\n'.format(_VERSION))
exit(0)
# read more options from stdin if there are are any
# but only if we're on a 'nix system with tty's
tty = '/dev/tty'
if not _sys.stdin.isatty() and _path.exists(tty):
if _sys.version_info.major == 3:
stream.write('[!] python3 input bug - tab completion not available\n')
stream.write('[!] python3 input bug - arrow support not available\n')
stream.write('[!] only known workaround is to not pipe in.\n')
options += [l.rstrip() for l in _sys.stdin]
# switch to the main tty
# this solution (to being interactive after reading from pipe)
# comes from: https://stackoverflow.com/questions/6312819/pipes-and-prompts-in-python-cli-scripts
_sys.stdin = open(tty)
# show the menu
try:
result = menu(
options,
pre_prompt=args.pre,
post_prompt=args.post,
default_index=args.default_index,
indexed=args.indexed,
insensitive=args.insensitive,
fuzzy=args.fuzzy,
stream=stream
)
# print the result (to stdout)
_sys.stdout.write(result + '\n')
except KeyboardInterrupt:
_sys.stderr.write("\nCTRL-C detected. Exiting.\n")
_sys.stderr.flush()
except Exception as e:
_sys.stdout.write("ERROR: {}\n".format(e))
exit(1) | [
"def",
"_cli",
"(",
")",
":",
"parser",
"=",
"_argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'''\n Present the user with a simple CLI menu, and return the option chosen.\n The menu is presented via stderr.\n The output is printed to stdout for piping.\n '''",
".",
"format",
"(",
"_VERSION",
")",
",",
"epilog",
"=",
"'''\n The default for the post prompt is \"Enter an option to continue: \".\n If --default-index is specified, the default option value will be printed\n in the post prompt as well.\n '''",
")",
"parser",
".",
"add_argument",
"(",
"'option'",
",",
"help",
"=",
"'The option(s) to present to the user.'",
",",
"nargs",
"=",
"'*'",
")",
"parser",
".",
"add_argument",
"(",
"'--version'",
",",
"'-v'",
",",
"help",
"=",
"'Print the version and then exit'",
",",
"action",
"=",
"'store_true'",
")",
"parser",
".",
"add_argument",
"(",
"'--pre'",
",",
"'-p'",
",",
"help",
"=",
"'The pre-prompt/title/introduction to the menu. [%(default)s]'",
",",
"default",
"=",
"'Options:'",
",",
"metavar",
"=",
"'TEXT'",
")",
"parser",
".",
"add_argument",
"(",
"'--post'",
",",
"'-P'",
",",
"help",
"=",
"'The prompt presented to the user after the menu items.'",
",",
"default",
"=",
"_NO_ARG",
",",
"metavar",
"=",
"'TEXT'",
")",
"parser",
".",
"add_argument",
"(",
"'--default-index'",
",",
"'-d'",
",",
"help",
"=",
"'The index of the item to use as the default'",
",",
"type",
"=",
"int",
",",
"metavar",
"=",
"'INT'",
")",
"parser",
".",
"add_argument",
"(",
"'--indexed'",
",",
"'-i'",
",",
"help",
"=",
"'Print indices with the options, and allow the user to use them to choose.'",
",",
"action",
"=",
"'store_true'",
")",
"parser",
".",
"add_argument",
"(",
"'--insensitive'",
",",
"'-I'",
",",
"help",
"=",
"(",
"'Perform insensitive matching. Also drops any items that case-insensitively match'",
"+",
"' prior items.'",
")",
",",
"action",
"=",
"'store_true'",
")",
"parser",
".",
"add_argument",
"(",
"'--fuzzy'",
",",
"'-f'",
",",
"help",
"=",
"'search for the individual words in the user input anywhere in the item strings.'",
",",
"action",
"=",
"'store_true'",
")",
"parser",
".",
"add_argument",
"(",
"'--stdout'",
",",
"help",
"=",
"'Use stdout for interactive output (instead of the default: stderr).'",
",",
"action",
"=",
"'store_true'",
")",
"# parse options",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"# argparse nargs is awkward. Translate to be a proper plural.",
"options",
"=",
"args",
".",
"option",
"# set the stream",
"stream",
"=",
"_sys",
".",
"stdout",
"if",
"args",
".",
"stdout",
"else",
"_sys",
".",
"stderr",
"# if version, print version and exit",
"if",
"args",
".",
"version",
":",
"stream",
".",
"write",
"(",
"'Pimento - v{}\\n'",
".",
"format",
"(",
"_VERSION",
")",
")",
"exit",
"(",
"0",
")",
"# read more options from stdin if there are are any",
"# but only if we're on a 'nix system with tty's",
"tty",
"=",
"'/dev/tty'",
"if",
"not",
"_sys",
".",
"stdin",
".",
"isatty",
"(",
")",
"and",
"_path",
".",
"exists",
"(",
"tty",
")",
":",
"if",
"_sys",
".",
"version_info",
".",
"major",
"==",
"3",
":",
"stream",
".",
"write",
"(",
"'[!] python3 input bug - tab completion not available\\n'",
")",
"stream",
".",
"write",
"(",
"'[!] python3 input bug - arrow support not available\\n'",
")",
"stream",
".",
"write",
"(",
"'[!] only known workaround is to not pipe in.\\n'",
")",
"options",
"+=",
"[",
"l",
".",
"rstrip",
"(",
")",
"for",
"l",
"in",
"_sys",
".",
"stdin",
"]",
"# switch to the main tty",
"# this solution (to being interactive after reading from pipe)",
"# comes from: https://stackoverflow.com/questions/6312819/pipes-and-prompts-in-python-cli-scripts",
"_sys",
".",
"stdin",
"=",
"open",
"(",
"tty",
")",
"# show the menu",
"try",
":",
"result",
"=",
"menu",
"(",
"options",
",",
"pre_prompt",
"=",
"args",
".",
"pre",
",",
"post_prompt",
"=",
"args",
".",
"post",
",",
"default_index",
"=",
"args",
".",
"default_index",
",",
"indexed",
"=",
"args",
".",
"indexed",
",",
"insensitive",
"=",
"args",
".",
"insensitive",
",",
"fuzzy",
"=",
"args",
".",
"fuzzy",
",",
"stream",
"=",
"stream",
")",
"# print the result (to stdout)",
"_sys",
".",
"stdout",
".",
"write",
"(",
"result",
"+",
"'\\n'",
")",
"except",
"KeyboardInterrupt",
":",
"_sys",
".",
"stderr",
".",
"write",
"(",
"\"\\nCTRL-C detected. Exiting.\\n\"",
")",
"_sys",
".",
"stderr",
".",
"flush",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"_sys",
".",
"stdout",
".",
"write",
"(",
"\"ERROR: {}\\n\"",
".",
"format",
"(",
"e",
")",
")",
"exit",
"(",
"1",
")"
]
| CLI interface | [
"CLI",
"interface"
]
| python | train | 34.509259 |
joferkington/mpldatacursor | mpldatacursor/datacursor.py | https://github.com/joferkington/mpldatacursor/blob/7dabc589ed02c35ac5d89de5931f91e0323aa795/mpldatacursor/datacursor.py#L658-L719 | def _select(self, event):
"""This is basically a proxy to trigger a pick event. This function is
connected to either a mouse motion or mouse button event (see
"self.enable") depending on "self.hover". If we're over a point, it
fires a pick event.
This probably seems bizarre, but it's required for hover mode (no mouse
click) and otherwise it's a workaround for picking artists in twinned
or overlapping axes.
Even if we're not in hover mode, pick events won't work properly for
twinned axes. Therefore, we manually go through all artists managed by
this datacursor and fire a pick event if the mouse is over an a managed
artist."""
def event_axes_data(event, ax):
"""Creates a new event will have xdata and ydata based on *ax*."""
# We need to redefine event.xdata and event.ydata for twinned axes
# to work correctly
point = event.x, event.y
x, y = ax.transData.inverted().transform_point(point)
event = copy.copy(event)
event.xdata, event.ydata = x, y
return event
def contains(artist, event):
"""Need to ensure we don't trigger a pick event for axes in a
different figure. Otherwise, picking on one figure will trigger a
datacursor in another figure."""
if event.canvas is artist.figure.canvas:
return artist.contains(event)
else:
return False, {}
# If we're on top of an annotation box, hide it if right-clicked or
# do nothing if we're in draggable mode
for anno in list(self.annotations.values()):
fixed_event = event_axes_data(event, anno.axes)
if contains(anno, fixed_event)[0]:
if event.button == self.hide_button:
self._hide_box(anno)
elif self.draggable:
return
for artist in self.artists:
fixed_event = event_axes_data(event, artist.axes)
inside, info = contains(artist, fixed_event)
if inside:
fig = artist.figure
new_event = PickEvent('pick_event', fig.canvas, fixed_event,
artist, **info)
self(new_event)
# Only fire a single pick event for one mouseevent. Otherwise
# we'll need timers, etc to avoid multiple calls
break
# Not hovering over anything...
if self.hover:
artists = itertools.chain(self.artists, self.annotations.values())
over_something = [contains(artist, event)[0] for artist in artists]
if not any(over_something):
self.hide() | [
"def",
"_select",
"(",
"self",
",",
"event",
")",
":",
"def",
"event_axes_data",
"(",
"event",
",",
"ax",
")",
":",
"\"\"\"Creates a new event will have xdata and ydata based on *ax*.\"\"\"",
"# We need to redefine event.xdata and event.ydata for twinned axes",
"# to work correctly",
"point",
"=",
"event",
".",
"x",
",",
"event",
".",
"y",
"x",
",",
"y",
"=",
"ax",
".",
"transData",
".",
"inverted",
"(",
")",
".",
"transform_point",
"(",
"point",
")",
"event",
"=",
"copy",
".",
"copy",
"(",
"event",
")",
"event",
".",
"xdata",
",",
"event",
".",
"ydata",
"=",
"x",
",",
"y",
"return",
"event",
"def",
"contains",
"(",
"artist",
",",
"event",
")",
":",
"\"\"\"Need to ensure we don't trigger a pick event for axes in a\n different figure. Otherwise, picking on one figure will trigger a\n datacursor in another figure.\"\"\"",
"if",
"event",
".",
"canvas",
"is",
"artist",
".",
"figure",
".",
"canvas",
":",
"return",
"artist",
".",
"contains",
"(",
"event",
")",
"else",
":",
"return",
"False",
",",
"{",
"}",
"# If we're on top of an annotation box, hide it if right-clicked or",
"# do nothing if we're in draggable mode",
"for",
"anno",
"in",
"list",
"(",
"self",
".",
"annotations",
".",
"values",
"(",
")",
")",
":",
"fixed_event",
"=",
"event_axes_data",
"(",
"event",
",",
"anno",
".",
"axes",
")",
"if",
"contains",
"(",
"anno",
",",
"fixed_event",
")",
"[",
"0",
"]",
":",
"if",
"event",
".",
"button",
"==",
"self",
".",
"hide_button",
":",
"self",
".",
"_hide_box",
"(",
"anno",
")",
"elif",
"self",
".",
"draggable",
":",
"return",
"for",
"artist",
"in",
"self",
".",
"artists",
":",
"fixed_event",
"=",
"event_axes_data",
"(",
"event",
",",
"artist",
".",
"axes",
")",
"inside",
",",
"info",
"=",
"contains",
"(",
"artist",
",",
"fixed_event",
")",
"if",
"inside",
":",
"fig",
"=",
"artist",
".",
"figure",
"new_event",
"=",
"PickEvent",
"(",
"'pick_event'",
",",
"fig",
".",
"canvas",
",",
"fixed_event",
",",
"artist",
",",
"*",
"*",
"info",
")",
"self",
"(",
"new_event",
")",
"# Only fire a single pick event for one mouseevent. Otherwise",
"# we'll need timers, etc to avoid multiple calls",
"break",
"# Not hovering over anything...",
"if",
"self",
".",
"hover",
":",
"artists",
"=",
"itertools",
".",
"chain",
"(",
"self",
".",
"artists",
",",
"self",
".",
"annotations",
".",
"values",
"(",
")",
")",
"over_something",
"=",
"[",
"contains",
"(",
"artist",
",",
"event",
")",
"[",
"0",
"]",
"for",
"artist",
"in",
"artists",
"]",
"if",
"not",
"any",
"(",
"over_something",
")",
":",
"self",
".",
"hide",
"(",
")"
]
| This is basically a proxy to trigger a pick event. This function is
connected to either a mouse motion or mouse button event (see
"self.enable") depending on "self.hover". If we're over a point, it
fires a pick event.
This probably seems bizarre, but it's required for hover mode (no mouse
click) and otherwise it's a workaround for picking artists in twinned
or overlapping axes.
Even if we're not in hover mode, pick events won't work properly for
twinned axes. Therefore, we manually go through all artists managed by
this datacursor and fire a pick event if the mouse is over an a managed
artist. | [
"This",
"is",
"basically",
"a",
"proxy",
"to",
"trigger",
"a",
"pick",
"event",
".",
"This",
"function",
"is",
"connected",
"to",
"either",
"a",
"mouse",
"motion",
"or",
"mouse",
"button",
"event",
"(",
"see",
"self",
".",
"enable",
")",
"depending",
"on",
"self",
".",
"hover",
".",
"If",
"we",
"re",
"over",
"a",
"point",
"it",
"fires",
"a",
"pick",
"event",
"."
]
| python | train | 44.580645 |
malramsay64/experi | src/experi/run.py | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L90-L114 | def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
) | [
"def",
"iterator_chain",
"(",
"variables",
":",
"VarType",
",",
"parent",
":",
"str",
"=",
"None",
")",
"->",
"Iterable",
"[",
"VarMatrix",
"]",
":",
"logger",
".",
"debug",
"(",
"\"Yielding from append iterator\"",
")",
"if",
"not",
"isinstance",
"(",
"variables",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"f\"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}\"",
")",
"# Create a single list containing all the values",
"yield",
"list",
"(",
"chain",
".",
"from_iterable",
"(",
"variable_matrix",
"(",
"item",
",",
"parent",
",",
"\"product\"",
")",
"for",
"item",
"in",
"variables",
")",
")"
]
| This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused | [
"This",
"successively",
"appends",
"each",
"element",
"of",
"an",
"array",
"to",
"a",
"single",
"list",
"of",
"values",
"."
]
| python | train | 38.24 |
bcbio/bcbio-nextgen | bcbio/variation/scalpel.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/scalpel.py#L77-L90 | def run_scalpel(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Run Scalpel indel calling, either paired tumor/normal or germline calling.
"""
if region is None:
message = ("A region must be provided for Scalpel")
raise ValueError(message)
if is_paired_analysis(align_bams, items):
call_file = _run_scalpel_paired(align_bams, items, ref_file,
assoc_files, region, out_file)
else:
call_file = _run_scalpel_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return call_file | [
"def",
"run_scalpel",
"(",
"align_bams",
",",
"items",
",",
"ref_file",
",",
"assoc_files",
",",
"region",
"=",
"None",
",",
"out_file",
"=",
"None",
")",
":",
"if",
"region",
"is",
"None",
":",
"message",
"=",
"(",
"\"A region must be provided for Scalpel\"",
")",
"raise",
"ValueError",
"(",
"message",
")",
"if",
"is_paired_analysis",
"(",
"align_bams",
",",
"items",
")",
":",
"call_file",
"=",
"_run_scalpel_paired",
"(",
"align_bams",
",",
"items",
",",
"ref_file",
",",
"assoc_files",
",",
"region",
",",
"out_file",
")",
"else",
":",
"call_file",
"=",
"_run_scalpel_caller",
"(",
"align_bams",
",",
"items",
",",
"ref_file",
",",
"assoc_files",
",",
"region",
",",
"out_file",
")",
"return",
"call_file"
]
| Run Scalpel indel calling, either paired tumor/normal or germline calling. | [
"Run",
"Scalpel",
"indel",
"calling",
"either",
"paired",
"tumor",
"/",
"normal",
"or",
"germline",
"calling",
"."
]
| python | train | 47.071429 |
hydpy-dev/hydpy | hydpy/core/propertytools.py | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/propertytools.py#L536-L538 | def call_fset(self, obj, value) -> None:
"""Store the given custom value and call the setter function."""
vars(obj)[self.name] = self.fset(obj, value) | [
"def",
"call_fset",
"(",
"self",
",",
"obj",
",",
"value",
")",
"->",
"None",
":",
"vars",
"(",
"obj",
")",
"[",
"self",
".",
"name",
"]",
"=",
"self",
".",
"fset",
"(",
"obj",
",",
"value",
")"
]
| Store the given custom value and call the setter function. | [
"Store",
"the",
"given",
"custom",
"value",
"and",
"call",
"the",
"setter",
"function",
"."
]
| python | train | 54.666667 |
studionow/pybrightcove | pybrightcove/playlist.py | https://github.com/studionow/pybrightcove/blob/19c946b689a80156e070fe9bc35589c4b768e614/pybrightcove/playlist.py#L161-L168 | def delete(self, cascade=False):
"""
Deletes this playlist.
"""
if self.id:
self.connection.post('delete_playlist', playlist_id=self.id,
cascade=cascade)
self.id = None | [
"def",
"delete",
"(",
"self",
",",
"cascade",
"=",
"False",
")",
":",
"if",
"self",
".",
"id",
":",
"self",
".",
"connection",
".",
"post",
"(",
"'delete_playlist'",
",",
"playlist_id",
"=",
"self",
".",
"id",
",",
"cascade",
"=",
"cascade",
")",
"self",
".",
"id",
"=",
"None"
]
| Deletes this playlist. | [
"Deletes",
"this",
"playlist",
"."
]
| python | train | 29.125 |
andreasjansson/head-in-the-clouds | headintheclouds/ec2.py | https://github.com/andreasjansson/head-in-the-clouds/blob/32c1d00d01036834dc94368e7f38b0afd3f7a82f/headintheclouds/ec2.py#L42-L55 | def mount_volume(volume, device='/dev/xvdf', mountpoint='/mnt/data', fstype='ext4'):
'''
Mount an EBS volume
Args:
volume (str): EBS volume ID
device (str): default /dev/xvdf
mountpoint (str): default /mnt/data
fstype (str): default ext4
'''
_ec2().attach_volume(volume, _host_node()['id'], device)
time.sleep(1)
sudo('mkdir -p "%s"' % mountpoint)
sudo('mount -t "%s" "%s" "%s"' % (fstype, device, mountpoint)) | [
"def",
"mount_volume",
"(",
"volume",
",",
"device",
"=",
"'/dev/xvdf'",
",",
"mountpoint",
"=",
"'/mnt/data'",
",",
"fstype",
"=",
"'ext4'",
")",
":",
"_ec2",
"(",
")",
".",
"attach_volume",
"(",
"volume",
",",
"_host_node",
"(",
")",
"[",
"'id'",
"]",
",",
"device",
")",
"time",
".",
"sleep",
"(",
"1",
")",
"sudo",
"(",
"'mkdir -p \"%s\"'",
"%",
"mountpoint",
")",
"sudo",
"(",
"'mount -t \"%s\" \"%s\" \"%s\"'",
"%",
"(",
"fstype",
",",
"device",
",",
"mountpoint",
")",
")"
]
| Mount an EBS volume
Args:
volume (str): EBS volume ID
device (str): default /dev/xvdf
mountpoint (str): default /mnt/data
fstype (str): default ext4 | [
"Mount",
"an",
"EBS",
"volume"
]
| python | train | 33 |
datadesk/python-documentcloud | documentcloud/__init__.py | https://github.com/datadesk/python-documentcloud/blob/0d7f42cbf1edf5c61fca37ed846362cba4abfd76/documentcloud/__init__.py#L535-L549 | def get_or_create_by_title(self, title):
"""
Fetch a title, if it exists. Create it if it doesn't.
Returns a tuple with the object first, and then a boolean that
indicates whether or not the object was created fresh. True means it's
brand new.
"""
try:
obj = self.get_by_title(title)
created = False
except DoesNotExistError:
obj = self.create(title=title)
created = True
return obj, created | [
"def",
"get_or_create_by_title",
"(",
"self",
",",
"title",
")",
":",
"try",
":",
"obj",
"=",
"self",
".",
"get_by_title",
"(",
"title",
")",
"created",
"=",
"False",
"except",
"DoesNotExistError",
":",
"obj",
"=",
"self",
".",
"create",
"(",
"title",
"=",
"title",
")",
"created",
"=",
"True",
"return",
"obj",
",",
"created"
]
| Fetch a title, if it exists. Create it if it doesn't.
Returns a tuple with the object first, and then a boolean that
indicates whether or not the object was created fresh. True means it's
brand new. | [
"Fetch",
"a",
"title",
"if",
"it",
"exists",
".",
"Create",
"it",
"if",
"it",
"doesn",
"t",
"."
]
| python | train | 33.2 |
spacetelescope/drizzlepac | drizzlepac/hlautils/astrometric_utils.py | https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/hlautils/astrometric_utils.py#L987-L1109 | def find_hist2d_offset(filename, reference, refwcs=None, refnames=['ra', 'dec'],
match_tolerance=5., chip_catalog=True, search_radius=15.0,
min_match=10, classify=True):
"""Iteratively look for the best cross-match between the catalog and ref.
Parameters
----------
filename : `~astropy.io.fits.HDUList` or str
Single image to extract sources for matching to
the external astrometric catalog.
reference : str or `~astropy.table.Table`
Reference catalog, either as a filename or ``astropy.Table``
containing astrometrically accurate sky coordinates for astrometric
standard sources.
refwcs : `~stwcs.wcsutil.HSTWCS`
This WCS will define the coordinate frame which will
be used to determine the offset. If None is specified, use the
WCS from the input image `filename` to build this WCS using
`build_self_reference()`.
refnames : list
List of table column names for sky coordinates of astrometric
standard sources from reference catalog.
match_tolerance : float
Tolerance (in pixels) for recognizing that a source position matches
an astrometric catalog position. Larger values allow for lower
accuracy source positions to be compared to astrometric catalog
chip_catalog : bool
Specify whether or not to write out individual source catalog for
each chip in the image.
search_radius : float
Maximum separation (in arcseconds) from source positions to look
for valid cross-matches with reference source positions.
min_match : int
Minimum number of cross-matches for an acceptable determination of
the offset.
classify : bool
Specify whether or not to use central_moments classification to
ignore likely cosmic-rays/bad-pixels when generating the source
catalog.
Returns
-------
best_offset : tuple
Offset in input image pixels between image source positions and
astrometric catalog positions that results in largest number of
matches of astrometric sources with image sources
seg_xy, ref_xy : astropy.Table
Source catalog and reference catalog, respectively, used for
determining the offset. Each catalog includes sources for the entire
field-of-view, not just a single chip.
"""
# Interpret input image to generate initial source catalog and WCS
if isinstance(filename, str):
image = pf.open(filename)
rootname = filename.split("_")[0]
else:
image = filename
rootname = image[0].header['rootname']
# check to see whether reference catalog can be found
if not os.path.exists(reference):
log.info("Could not find input reference catalog: {}".format(reference))
raise FileNotFoundError
# Extract reference WCS from image
if refwcs is None:
refwcs = build_self_reference(image, clean_wcs=True)
log.info("Computing offset for field-of-view defined by:")
log.info(refwcs)
# read in reference catalog
if isinstance(reference, str):
refcat = ascii.read(reference)
else:
refcat = reference
log.info("\nRead in reference catalog with {} sources.".format(len(refcat)))
ref_ra = refcat[refnames[0]]
ref_dec = refcat[refnames[1]]
# Build source catalog for entire image
img_cat = generate_source_catalog(image, refwcs, output=chip_catalog, classify=classify)
img_cat.write(filename.replace(".fits", "_xy.cat"), format='ascii.no_header',
overwrite=True)
# Retrieve source XY positions in reference frame
seg_xy = np.column_stack((img_cat['xcentroid'], img_cat['ycentroid']))
seg_xy = seg_xy[~np.isnan(seg_xy[:, 0])]
# Translate reference catalog positions into input image coordinate frame
xref, yref = refwcs.all_world2pix(ref_ra, ref_dec, 1)
# look for only sources within the viewable area of the exposure to
# determine the offset
xref, yref = within_footprint(image, refwcs, xref, yref)
ref_xy = np.column_stack((xref, yref))
log.info("\nWorking with {} astrometric sources for this field".format(len(ref_xy)))
# write out astrometric reference catalog that was actually used
ref_ra_img, ref_dec_img = refwcs.all_pix2world(xref, yref, 1)
ref_tab = Table([ref_ra_img, ref_dec_img, xref, yref], names=['ra', 'dec', 'x', 'y'])
ref_tab.write(reference.replace('.cat', '_{}.cat'.format(rootname)),
format='ascii.fast_commented_header', overwrite=True)
searchrad = search_radius / refwcs.pscale
# Use 2d-Histogram builder from drizzlepac.tweakreg -- for demo only...
xp, yp, nmatches, zpqual = build_xy_zeropoint(seg_xy, ref_xy,
searchrad=searchrad,
histplot=False, figure_id=1,
plotname=None, interactive=False)
hist2d_offset = (xp, yp)
log.info('best offset {} based on {} cross-matches'.format(hist2d_offset, nmatches))
return hist2d_offset, seg_xy, ref_xy | [
"def",
"find_hist2d_offset",
"(",
"filename",
",",
"reference",
",",
"refwcs",
"=",
"None",
",",
"refnames",
"=",
"[",
"'ra'",
",",
"'dec'",
"]",
",",
"match_tolerance",
"=",
"5.",
",",
"chip_catalog",
"=",
"True",
",",
"search_radius",
"=",
"15.0",
",",
"min_match",
"=",
"10",
",",
"classify",
"=",
"True",
")",
":",
"# Interpret input image to generate initial source catalog and WCS",
"if",
"isinstance",
"(",
"filename",
",",
"str",
")",
":",
"image",
"=",
"pf",
".",
"open",
"(",
"filename",
")",
"rootname",
"=",
"filename",
".",
"split",
"(",
"\"_\"",
")",
"[",
"0",
"]",
"else",
":",
"image",
"=",
"filename",
"rootname",
"=",
"image",
"[",
"0",
"]",
".",
"header",
"[",
"'rootname'",
"]",
"# check to see whether reference catalog can be found",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"reference",
")",
":",
"log",
".",
"info",
"(",
"\"Could not find input reference catalog: {}\"",
".",
"format",
"(",
"reference",
")",
")",
"raise",
"FileNotFoundError",
"# Extract reference WCS from image",
"if",
"refwcs",
"is",
"None",
":",
"refwcs",
"=",
"build_self_reference",
"(",
"image",
",",
"clean_wcs",
"=",
"True",
")",
"log",
".",
"info",
"(",
"\"Computing offset for field-of-view defined by:\"",
")",
"log",
".",
"info",
"(",
"refwcs",
")",
"# read in reference catalog",
"if",
"isinstance",
"(",
"reference",
",",
"str",
")",
":",
"refcat",
"=",
"ascii",
".",
"read",
"(",
"reference",
")",
"else",
":",
"refcat",
"=",
"reference",
"log",
".",
"info",
"(",
"\"\\nRead in reference catalog with {} sources.\"",
".",
"format",
"(",
"len",
"(",
"refcat",
")",
")",
")",
"ref_ra",
"=",
"refcat",
"[",
"refnames",
"[",
"0",
"]",
"]",
"ref_dec",
"=",
"refcat",
"[",
"refnames",
"[",
"1",
"]",
"]",
"# Build source catalog for entire image",
"img_cat",
"=",
"generate_source_catalog",
"(",
"image",
",",
"refwcs",
",",
"output",
"=",
"chip_catalog",
",",
"classify",
"=",
"classify",
")",
"img_cat",
".",
"write",
"(",
"filename",
".",
"replace",
"(",
"\".fits\"",
",",
"\"_xy.cat\"",
")",
",",
"format",
"=",
"'ascii.no_header'",
",",
"overwrite",
"=",
"True",
")",
"# Retrieve source XY positions in reference frame",
"seg_xy",
"=",
"np",
".",
"column_stack",
"(",
"(",
"img_cat",
"[",
"'xcentroid'",
"]",
",",
"img_cat",
"[",
"'ycentroid'",
"]",
")",
")",
"seg_xy",
"=",
"seg_xy",
"[",
"~",
"np",
".",
"isnan",
"(",
"seg_xy",
"[",
":",
",",
"0",
"]",
")",
"]",
"# Translate reference catalog positions into input image coordinate frame",
"xref",
",",
"yref",
"=",
"refwcs",
".",
"all_world2pix",
"(",
"ref_ra",
",",
"ref_dec",
",",
"1",
")",
"# look for only sources within the viewable area of the exposure to",
"# determine the offset",
"xref",
",",
"yref",
"=",
"within_footprint",
"(",
"image",
",",
"refwcs",
",",
"xref",
",",
"yref",
")",
"ref_xy",
"=",
"np",
".",
"column_stack",
"(",
"(",
"xref",
",",
"yref",
")",
")",
"log",
".",
"info",
"(",
"\"\\nWorking with {} astrometric sources for this field\"",
".",
"format",
"(",
"len",
"(",
"ref_xy",
")",
")",
")",
"# write out astrometric reference catalog that was actually used",
"ref_ra_img",
",",
"ref_dec_img",
"=",
"refwcs",
".",
"all_pix2world",
"(",
"xref",
",",
"yref",
",",
"1",
")",
"ref_tab",
"=",
"Table",
"(",
"[",
"ref_ra_img",
",",
"ref_dec_img",
",",
"xref",
",",
"yref",
"]",
",",
"names",
"=",
"[",
"'ra'",
",",
"'dec'",
",",
"'x'",
",",
"'y'",
"]",
")",
"ref_tab",
".",
"write",
"(",
"reference",
".",
"replace",
"(",
"'.cat'",
",",
"'_{}.cat'",
".",
"format",
"(",
"rootname",
")",
")",
",",
"format",
"=",
"'ascii.fast_commented_header'",
",",
"overwrite",
"=",
"True",
")",
"searchrad",
"=",
"search_radius",
"/",
"refwcs",
".",
"pscale",
"# Use 2d-Histogram builder from drizzlepac.tweakreg -- for demo only...",
"xp",
",",
"yp",
",",
"nmatches",
",",
"zpqual",
"=",
"build_xy_zeropoint",
"(",
"seg_xy",
",",
"ref_xy",
",",
"searchrad",
"=",
"searchrad",
",",
"histplot",
"=",
"False",
",",
"figure_id",
"=",
"1",
",",
"plotname",
"=",
"None",
",",
"interactive",
"=",
"False",
")",
"hist2d_offset",
"=",
"(",
"xp",
",",
"yp",
")",
"log",
".",
"info",
"(",
"'best offset {} based on {} cross-matches'",
".",
"format",
"(",
"hist2d_offset",
",",
"nmatches",
")",
")",
"return",
"hist2d_offset",
",",
"seg_xy",
",",
"ref_xy"
]
| Iteratively look for the best cross-match between the catalog and ref.
Parameters
----------
filename : `~astropy.io.fits.HDUList` or str
Single image to extract sources for matching to
the external astrometric catalog.
reference : str or `~astropy.table.Table`
Reference catalog, either as a filename or ``astropy.Table``
containing astrometrically accurate sky coordinates for astrometric
standard sources.
refwcs : `~stwcs.wcsutil.HSTWCS`
This WCS will define the coordinate frame which will
be used to determine the offset. If None is specified, use the
WCS from the input image `filename` to build this WCS using
`build_self_reference()`.
refnames : list
List of table column names for sky coordinates of astrometric
standard sources from reference catalog.
match_tolerance : float
Tolerance (in pixels) for recognizing that a source position matches
an astrometric catalog position. Larger values allow for lower
accuracy source positions to be compared to astrometric catalog
chip_catalog : bool
Specify whether or not to write out individual source catalog for
each chip in the image.
search_radius : float
Maximum separation (in arcseconds) from source positions to look
for valid cross-matches with reference source positions.
min_match : int
Minimum number of cross-matches for an acceptable determination of
the offset.
classify : bool
Specify whether or not to use central_moments classification to
ignore likely cosmic-rays/bad-pixels when generating the source
catalog.
Returns
-------
best_offset : tuple
Offset in input image pixels between image source positions and
astrometric catalog positions that results in largest number of
matches of astrometric sources with image sources
seg_xy, ref_xy : astropy.Table
Source catalog and reference catalog, respectively, used for
determining the offset. Each catalog includes sources for the entire
field-of-view, not just a single chip. | [
"Iteratively",
"look",
"for",
"the",
"best",
"cross",
"-",
"match",
"between",
"the",
"catalog",
"and",
"ref",
"."
]
| python | train | 42.756098 |
dhain/potpy | potpy/router.py | https://github.com/dhain/potpy/blob/e39a5a84f763fbf144b07a620afb02a5ff3741c9/potpy/router.py#L250-L261 | def add(self, match, handler):
"""Register a handler with the Router.
:param match: The first argument passed to the :meth:`match` method
when checking against this handler.
:param handler: A callable or :class:`Route` instance that will handle
matching calls. If not a Route instance, will be wrapped in one.
"""
self.routes.append((match, (
Route(handler) if not isinstance(handler, Route)
else handler
))) | [
"def",
"add",
"(",
"self",
",",
"match",
",",
"handler",
")",
":",
"self",
".",
"routes",
".",
"append",
"(",
"(",
"match",
",",
"(",
"Route",
"(",
"handler",
")",
"if",
"not",
"isinstance",
"(",
"handler",
",",
"Route",
")",
"else",
"handler",
")",
")",
")"
]
| Register a handler with the Router.
:param match: The first argument passed to the :meth:`match` method
when checking against this handler.
:param handler: A callable or :class:`Route` instance that will handle
matching calls. If not a Route instance, will be wrapped in one. | [
"Register",
"a",
"handler",
"with",
"the",
"Router",
"."
]
| python | train | 41.166667 |
CZ-NIC/yangson | yangson/schemadata.py | https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/schemadata.py#L459-L466 | def derived_from_all(self, identities: List[QualName]) -> MutableSet[QualName]:
"""Return list of identities transitively derived from all `identity`."""
if not identities:
return set()
res = self.derived_from(identities[0])
for id in identities[1:]:
res &= self.derived_from(id)
return res | [
"def",
"derived_from_all",
"(",
"self",
",",
"identities",
":",
"List",
"[",
"QualName",
"]",
")",
"->",
"MutableSet",
"[",
"QualName",
"]",
":",
"if",
"not",
"identities",
":",
"return",
"set",
"(",
")",
"res",
"=",
"self",
".",
"derived_from",
"(",
"identities",
"[",
"0",
"]",
")",
"for",
"id",
"in",
"identities",
"[",
"1",
":",
"]",
":",
"res",
"&=",
"self",
".",
"derived_from",
"(",
"id",
")",
"return",
"res"
]
| Return list of identities transitively derived from all `identity`. | [
"Return",
"list",
"of",
"identities",
"transitively",
"derived",
"from",
"all",
"identity",
"."
]
| python | train | 43.375 |
tanghaibao/jcvi | jcvi/projects/synfind.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/synfind.py#L52-L85 | def grasstruth(args):
"""
%prog grasstruth james-pan-grass.txt
Prepare truth pairs for 4 grasses.
"""
p = OptionParser(grasstruth.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
james, = args
fp = open(james)
pairs = set()
for row in fp:
atoms = row.split()
genes = []
idx = {}
for i, a in enumerate(atoms):
aa = a.split("||")
for ma in aa:
idx[ma] = i
genes.extend(aa)
genes = [x for x in genes if ":" not in x]
Os = [x for x in genes if x.startswith("Os")]
for o in Os:
for g in genes:
if idx[o] == idx[g]:
continue
pairs.add(tuple(sorted((o, g))))
for a, b in sorted(pairs):
print("\t".join((a, b))) | [
"def",
"grasstruth",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"grasstruth",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"james",
",",
"=",
"args",
"fp",
"=",
"open",
"(",
"james",
")",
"pairs",
"=",
"set",
"(",
")",
"for",
"row",
"in",
"fp",
":",
"atoms",
"=",
"row",
".",
"split",
"(",
")",
"genes",
"=",
"[",
"]",
"idx",
"=",
"{",
"}",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"atoms",
")",
":",
"aa",
"=",
"a",
".",
"split",
"(",
"\"||\"",
")",
"for",
"ma",
"in",
"aa",
":",
"idx",
"[",
"ma",
"]",
"=",
"i",
"genes",
".",
"extend",
"(",
"aa",
")",
"genes",
"=",
"[",
"x",
"for",
"x",
"in",
"genes",
"if",
"\":\"",
"not",
"in",
"x",
"]",
"Os",
"=",
"[",
"x",
"for",
"x",
"in",
"genes",
"if",
"x",
".",
"startswith",
"(",
"\"Os\"",
")",
"]",
"for",
"o",
"in",
"Os",
":",
"for",
"g",
"in",
"genes",
":",
"if",
"idx",
"[",
"o",
"]",
"==",
"idx",
"[",
"g",
"]",
":",
"continue",
"pairs",
".",
"add",
"(",
"tuple",
"(",
"sorted",
"(",
"(",
"o",
",",
"g",
")",
")",
")",
")",
"for",
"a",
",",
"b",
"in",
"sorted",
"(",
"pairs",
")",
":",
"print",
"(",
"\"\\t\"",
".",
"join",
"(",
"(",
"a",
",",
"b",
")",
")",
")"
]
| %prog grasstruth james-pan-grass.txt
Prepare truth pairs for 4 grasses. | [
"%prog",
"grasstruth",
"james",
"-",
"pan",
"-",
"grass",
".",
"txt"
]
| python | train | 25 |
exosite-labs/pyonep | pyonep/provision.py | https://github.com/exosite-labs/pyonep/blob/d27b621b00688a542e0adcc01f3e3354c05238a1/pyonep/provision.py#L237-L248 | def content_list(self, key, model):
"""Returns the list of content IDs for a given model.
This method maps to
https://github.com/exosite/docs/tree/master/provision#get---list-content-ids
Args:
key: The CIK or Token for the device
model:
"""
path = PROVISION_MANAGE_CONTENT + model + '/'
return self._request(path, key, '', 'GET', self._manage_by_cik) | [
"def",
"content_list",
"(",
"self",
",",
"key",
",",
"model",
")",
":",
"path",
"=",
"PROVISION_MANAGE_CONTENT",
"+",
"model",
"+",
"'/'",
"return",
"self",
".",
"_request",
"(",
"path",
",",
"key",
",",
"''",
",",
"'GET'",
",",
"self",
".",
"_manage_by_cik",
")"
]
| Returns the list of content IDs for a given model.
This method maps to
https://github.com/exosite/docs/tree/master/provision#get---list-content-ids
Args:
key: The CIK or Token for the device
model: | [
"Returns",
"the",
"list",
"of",
"content",
"IDs",
"for",
"a",
"given",
"model",
"."
]
| python | train | 35.083333 |
Komnomnomnom/swigibpy | swigibpy.py | https://github.com/Komnomnomnom/swigibpy/blob/cfd307fdbfaffabc69a2dc037538d7e34a8b8daf/swigibpy.py#L1220-L1222 | def exerciseOptions(self, id, contract, exerciseAction, exerciseQuantity, account, override):
"""exerciseOptions(EClient self, TickerId id, Contract contract, int exerciseAction, int exerciseQuantity, IBString const & account, int override)"""
return _swigibpy.EClient_exerciseOptions(self, id, contract, exerciseAction, exerciseQuantity, account, override) | [
"def",
"exerciseOptions",
"(",
"self",
",",
"id",
",",
"contract",
",",
"exerciseAction",
",",
"exerciseQuantity",
",",
"account",
",",
"override",
")",
":",
"return",
"_swigibpy",
".",
"EClient_exerciseOptions",
"(",
"self",
",",
"id",
",",
"contract",
",",
"exerciseAction",
",",
"exerciseQuantity",
",",
"account",
",",
"override",
")"
]
| exerciseOptions(EClient self, TickerId id, Contract contract, int exerciseAction, int exerciseQuantity, IBString const & account, int override) | [
"exerciseOptions",
"(",
"EClient",
"self",
"TickerId",
"id",
"Contract",
"contract",
"int",
"exerciseAction",
"int",
"exerciseQuantity",
"IBString",
"const",
"&",
"account",
"int",
"override",
")"
]
| python | train | 123.666667 |
stevearc/dql | dql/engine.py | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L974-L992 | def execute(self, fragment, pretty_format=True):
"""
Run or aggregate a query fragment
Concat the fragment to any stored fragments. If they form a complete
query, run it and return the result. If not, store them and return
None.
"""
self.fragments = (self.fragments + "\n" + fragment).lstrip()
try:
line_parser.parseString(self.fragments)
except ParseException:
pass
else:
self.last_query = self.fragments.strip()
self.fragments = ""
return super(FragmentEngine, self).execute(self.last_query, pretty_format)
return None | [
"def",
"execute",
"(",
"self",
",",
"fragment",
",",
"pretty_format",
"=",
"True",
")",
":",
"self",
".",
"fragments",
"=",
"(",
"self",
".",
"fragments",
"+",
"\"\\n\"",
"+",
"fragment",
")",
".",
"lstrip",
"(",
")",
"try",
":",
"line_parser",
".",
"parseString",
"(",
"self",
".",
"fragments",
")",
"except",
"ParseException",
":",
"pass",
"else",
":",
"self",
".",
"last_query",
"=",
"self",
".",
"fragments",
".",
"strip",
"(",
")",
"self",
".",
"fragments",
"=",
"\"\"",
"return",
"super",
"(",
"FragmentEngine",
",",
"self",
")",
".",
"execute",
"(",
"self",
".",
"last_query",
",",
"pretty_format",
")",
"return",
"None"
]
| Run or aggregate a query fragment
Concat the fragment to any stored fragments. If they form a complete
query, run it and return the result. If not, store them and return
None. | [
"Run",
"or",
"aggregate",
"a",
"query",
"fragment"
]
| python | train | 34.315789 |
aiven/pghoard | pghoard/pghoard.py | https://github.com/aiven/pghoard/blob/2994165d4ef3ff7a5669a2527346bcbfb5b3bd8a/pghoard/pghoard.py#L346-L392 | def startup_walk_for_missed_files(self):
"""Check xlog and xlog_incoming directories for files that receivexlog has received but not yet
compressed as well as the files we have compressed but not yet uploaded and process them."""
for site in self.config["backup_sites"]:
compressed_xlog_path, _ = self.create_backup_site_paths(site)
uncompressed_xlog_path = compressed_xlog_path + "_incoming"
# Process uncompressed files (ie WAL pg_receivexlog received)
for filename in os.listdir(uncompressed_xlog_path):
full_path = os.path.join(uncompressed_xlog_path, filename)
if not wal.WAL_RE.match(filename) and not wal.TIMELINE_RE.match(filename):
self.log.warning("Found invalid file %r from incoming xlog directory", full_path)
continue
compression_event = {
"delete_file_after_compression": True,
"full_path": full_path,
"site": site,
"src_path": "{}.partial",
"type": "MOVE",
}
self.log.debug("Found: %r when starting up, adding to compression queue", compression_event)
self.compression_queue.put(compression_event)
# Process compressed files (ie things we've processed but not yet uploaded)
for filename in os.listdir(compressed_xlog_path):
if filename.endswith(".metadata"):
continue # silently ignore .metadata files, they're expected and processed below
full_path = os.path.join(compressed_xlog_path, filename)
metadata_path = full_path + ".metadata"
is_xlog = wal.WAL_RE.match(filename)
is_timeline = wal.TIMELINE_RE.match(filename)
if not ((is_xlog or is_timeline) and os.path.exists(metadata_path)):
self.log.warning("Found invalid file %r from compressed xlog directory", full_path)
continue
with open(metadata_path, "r") as fp:
metadata = json.load(fp)
transfer_event = {
"file_size": os.path.getsize(full_path),
"filetype": "xlog" if is_xlog else "timeline",
"local_path": full_path,
"metadata": metadata,
"site": site,
"type": "UPLOAD",
}
self.log.debug("Found: %r when starting up, adding to transfer queue", transfer_event)
self.transfer_queue.put(transfer_event) | [
"def",
"startup_walk_for_missed_files",
"(",
"self",
")",
":",
"for",
"site",
"in",
"self",
".",
"config",
"[",
"\"backup_sites\"",
"]",
":",
"compressed_xlog_path",
",",
"_",
"=",
"self",
".",
"create_backup_site_paths",
"(",
"site",
")",
"uncompressed_xlog_path",
"=",
"compressed_xlog_path",
"+",
"\"_incoming\"",
"# Process uncompressed files (ie WAL pg_receivexlog received)",
"for",
"filename",
"in",
"os",
".",
"listdir",
"(",
"uncompressed_xlog_path",
")",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"uncompressed_xlog_path",
",",
"filename",
")",
"if",
"not",
"wal",
".",
"WAL_RE",
".",
"match",
"(",
"filename",
")",
"and",
"not",
"wal",
".",
"TIMELINE_RE",
".",
"match",
"(",
"filename",
")",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Found invalid file %r from incoming xlog directory\"",
",",
"full_path",
")",
"continue",
"compression_event",
"=",
"{",
"\"delete_file_after_compression\"",
":",
"True",
",",
"\"full_path\"",
":",
"full_path",
",",
"\"site\"",
":",
"site",
",",
"\"src_path\"",
":",
"\"{}.partial\"",
",",
"\"type\"",
":",
"\"MOVE\"",
",",
"}",
"self",
".",
"log",
".",
"debug",
"(",
"\"Found: %r when starting up, adding to compression queue\"",
",",
"compression_event",
")",
"self",
".",
"compression_queue",
".",
"put",
"(",
"compression_event",
")",
"# Process compressed files (ie things we've processed but not yet uploaded)",
"for",
"filename",
"in",
"os",
".",
"listdir",
"(",
"compressed_xlog_path",
")",
":",
"if",
"filename",
".",
"endswith",
"(",
"\".metadata\"",
")",
":",
"continue",
"# silently ignore .metadata files, they're expected and processed below",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"compressed_xlog_path",
",",
"filename",
")",
"metadata_path",
"=",
"full_path",
"+",
"\".metadata\"",
"is_xlog",
"=",
"wal",
".",
"WAL_RE",
".",
"match",
"(",
"filename",
")",
"is_timeline",
"=",
"wal",
".",
"TIMELINE_RE",
".",
"match",
"(",
"filename",
")",
"if",
"not",
"(",
"(",
"is_xlog",
"or",
"is_timeline",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"metadata_path",
")",
")",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Found invalid file %r from compressed xlog directory\"",
",",
"full_path",
")",
"continue",
"with",
"open",
"(",
"metadata_path",
",",
"\"r\"",
")",
"as",
"fp",
":",
"metadata",
"=",
"json",
".",
"load",
"(",
"fp",
")",
"transfer_event",
"=",
"{",
"\"file_size\"",
":",
"os",
".",
"path",
".",
"getsize",
"(",
"full_path",
")",
",",
"\"filetype\"",
":",
"\"xlog\"",
"if",
"is_xlog",
"else",
"\"timeline\"",
",",
"\"local_path\"",
":",
"full_path",
",",
"\"metadata\"",
":",
"metadata",
",",
"\"site\"",
":",
"site",
",",
"\"type\"",
":",
"\"UPLOAD\"",
",",
"}",
"self",
".",
"log",
".",
"debug",
"(",
"\"Found: %r when starting up, adding to transfer queue\"",
",",
"transfer_event",
")",
"self",
".",
"transfer_queue",
".",
"put",
"(",
"transfer_event",
")"
]
| Check xlog and xlog_incoming directories for files that receivexlog has received but not yet
compressed as well as the files we have compressed but not yet uploaded and process them. | [
"Check",
"xlog",
"and",
"xlog_incoming",
"directories",
"for",
"files",
"that",
"receivexlog",
"has",
"received",
"but",
"not",
"yet",
"compressed",
"as",
"well",
"as",
"the",
"files",
"we",
"have",
"compressed",
"but",
"not",
"yet",
"uploaded",
"and",
"process",
"them",
"."
]
| python | train | 56.170213 |
saimn/sigal | sigal/video.py | https://github.com/saimn/sigal/blob/912ca39991355d358dc85fd55c7aeabdd7acc386/sigal/video.py#L38-L59 | def check_subprocess(cmd, source, outname):
"""Run the command to resize the video and remove the output file if the
processing fails.
"""
logger = logging.getLogger(__name__)
try:
res = subprocess.run(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except KeyboardInterrupt:
logger.debug('Process terminated, removing file %s', outname)
if os.path.isfile(outname):
os.remove(outname)
raise
if res.returncode:
logger.debug('STDOUT:\n %s', res.stdout.decode('utf8'))
logger.debug('STDERR:\n %s', res.stderr.decode('utf8'))
if os.path.isfile(outname):
logger.debug('Removing file %s', outname)
os.remove(outname)
raise SubprocessException('Failed to process ' + source) | [
"def",
"check_subprocess",
"(",
"cmd",
",",
"source",
",",
"outname",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"try",
":",
"res",
"=",
"subprocess",
".",
"run",
"(",
"cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"except",
"KeyboardInterrupt",
":",
"logger",
".",
"debug",
"(",
"'Process terminated, removing file %s'",
",",
"outname",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"outname",
")",
":",
"os",
".",
"remove",
"(",
"outname",
")",
"raise",
"if",
"res",
".",
"returncode",
":",
"logger",
".",
"debug",
"(",
"'STDOUT:\\n %s'",
",",
"res",
".",
"stdout",
".",
"decode",
"(",
"'utf8'",
")",
")",
"logger",
".",
"debug",
"(",
"'STDERR:\\n %s'",
",",
"res",
".",
"stderr",
".",
"decode",
"(",
"'utf8'",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"outname",
")",
":",
"logger",
".",
"debug",
"(",
"'Removing file %s'",
",",
"outname",
")",
"os",
".",
"remove",
"(",
"outname",
")",
"raise",
"SubprocessException",
"(",
"'Failed to process '",
"+",
"source",
")"
]
| Run the command to resize the video and remove the output file if the
processing fails. | [
"Run",
"the",
"command",
"to",
"resize",
"the",
"video",
"and",
"remove",
"the",
"output",
"file",
"if",
"the",
"processing",
"fails",
"."
]
| python | valid | 36.818182 |
Opentrons/opentrons | api/src/opentrons/protocol_api/contexts.py | https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/protocol_api/contexts.py#L185-L200 | def load_labware(
self, labware_obj: Labware,
location: types.DeckLocation) -> Labware:
""" Specify the presence of a piece of labware on the OT2 deck.
This function loads the labware specified by `labware`
(previously loaded from a configuration file) to the location
specified by `location`.
:param Labware labware: The labware object to load
:param location: The slot into which to load the labware such as
1 or '1'
:type location: int or str
"""
self._deck_layout[location] = labware_obj
return labware_obj | [
"def",
"load_labware",
"(",
"self",
",",
"labware_obj",
":",
"Labware",
",",
"location",
":",
"types",
".",
"DeckLocation",
")",
"->",
"Labware",
":",
"self",
".",
"_deck_layout",
"[",
"location",
"]",
"=",
"labware_obj",
"return",
"labware_obj"
]
| Specify the presence of a piece of labware on the OT2 deck.
This function loads the labware specified by `labware`
(previously loaded from a configuration file) to the location
specified by `location`.
:param Labware labware: The labware object to load
:param location: The slot into which to load the labware such as
1 or '1'
:type location: int or str | [
"Specify",
"the",
"presence",
"of",
"a",
"piece",
"of",
"labware",
"on",
"the",
"OT2",
"deck",
"."
]
| python | train | 39.125 |
iotile/coretools | iotilecore/iotile/core/utilities/intelhex/__init__.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/intelhex/__init__.py#L1145-L1158 | def data(offset, bytes):
"""Return Data record. This constructs the full record, including
the length information, the record type (0x00), the
checksum, and the offset.
@param offset load offset of first byte.
@param bytes list of byte values to pack into record.
@return String representation of one HEX record
"""
assert 0 <= offset < 65536
assert 0 < len(bytes) < 256
b = [len(bytes), (offset>>8)&0x0FF, offset&0x0FF, 0x00] + bytes
return Record._from_bytes(b) | [
"def",
"data",
"(",
"offset",
",",
"bytes",
")",
":",
"assert",
"0",
"<=",
"offset",
"<",
"65536",
"assert",
"0",
"<",
"len",
"(",
"bytes",
")",
"<",
"256",
"b",
"=",
"[",
"len",
"(",
"bytes",
")",
",",
"(",
"offset",
">>",
"8",
")",
"&",
"0x0FF",
",",
"offset",
"&",
"0x0FF",
",",
"0x00",
"]",
"+",
"bytes",
"return",
"Record",
".",
"_from_bytes",
"(",
"b",
")"
]
| Return Data record. This constructs the full record, including
the length information, the record type (0x00), the
checksum, and the offset.
@param offset load offset of first byte.
@param bytes list of byte values to pack into record.
@return String representation of one HEX record | [
"Return",
"Data",
"record",
".",
"This",
"constructs",
"the",
"full",
"record",
"including",
"the",
"length",
"information",
"the",
"record",
"type",
"(",
"0x00",
")",
"the",
"checksum",
"and",
"the",
"offset",
"."
]
| python | train | 39.5 |
Guake/guake | guake/guake_app.py | https://github.com/Guake/guake/blob/4153ef38f9044cbed6494075fce80acd5809df2b/guake/guake_app.py#L437-L441 | def show_menu(self, status_icon, button, activate_time):
"""Show the tray icon menu.
"""
menu = self.get_widget('tray-menu')
menu.popup(None, None, None, Gtk.StatusIcon.position_menu, button, activate_time) | [
"def",
"show_menu",
"(",
"self",
",",
"status_icon",
",",
"button",
",",
"activate_time",
")",
":",
"menu",
"=",
"self",
".",
"get_widget",
"(",
"'tray-menu'",
")",
"menu",
".",
"popup",
"(",
"None",
",",
"None",
",",
"None",
",",
"Gtk",
".",
"StatusIcon",
".",
"position_menu",
",",
"button",
",",
"activate_time",
")"
]
| Show the tray icon menu. | [
"Show",
"the",
"tray",
"icon",
"menu",
"."
]
| python | train | 46.8 |
ldomic/lintools | lintools/draw.py | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/draw.py#L29-L48 | def draw_hydrogen_bonds(self,color="black"):
"""For each bond that has been determined to be important, a line gets drawn.
"""
self.draw_hbonds=""
if self.hbonds!=None:
for bond in self.hbonds.hbonds_for_drawing:
x = str((self.molecule.x_dim-self.molecule.molsize1)/2)
y = str((self.molecule.y_dim-self.molecule.molsize2)/2)
self.draw_hbonds ="<g id='"+str(bond[0])+"' class='HBonds' transform='translate("+x+","+y+")' x='"+x+"' y='"+y+"'>'"
atom = self.topology_data.universe.atoms[bond[0]-1] #zero-based index vs one-based index
residue = (atom.resname, str(atom.resid), atom.segid)
if bond[2] in ["N","O","H"]:
#backbone interactions
self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:white;stroke-width:15' />"
self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:"+color+";stroke-width:4' />"
else:
#sidechain interactions
self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:white;stroke-width:15' />"
self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' stroke-dasharray='5,5' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:"+color+";stroke-width:4' />"
self.draw_hbonds+="</g>" | [
"def",
"draw_hydrogen_bonds",
"(",
"self",
",",
"color",
"=",
"\"black\"",
")",
":",
"self",
".",
"draw_hbonds",
"=",
"\"\"",
"if",
"self",
".",
"hbonds",
"!=",
"None",
":",
"for",
"bond",
"in",
"self",
".",
"hbonds",
".",
"hbonds_for_drawing",
":",
"x",
"=",
"str",
"(",
"(",
"self",
".",
"molecule",
".",
"x_dim",
"-",
"self",
".",
"molecule",
".",
"molsize1",
")",
"/",
"2",
")",
"y",
"=",
"str",
"(",
"(",
"self",
".",
"molecule",
".",
"y_dim",
"-",
"self",
".",
"molecule",
".",
"molsize2",
")",
"/",
"2",
")",
"self",
".",
"draw_hbonds",
"=",
"\"<g id='\"",
"+",
"str",
"(",
"bond",
"[",
"0",
"]",
")",
"+",
"\"' class='HBonds' transform='translate(\"",
"+",
"x",
"+",
"\",\"",
"+",
"y",
"+",
"\")' x='\"",
"+",
"x",
"+",
"\"' y='\"",
"+",
"y",
"+",
"\"'>'\"",
"atom",
"=",
"self",
".",
"topology_data",
".",
"universe",
".",
"atoms",
"[",
"bond",
"[",
"0",
"]",
"-",
"1",
"]",
"#zero-based index vs one-based index",
"residue",
"=",
"(",
"atom",
".",
"resname",
",",
"str",
"(",
"atom",
".",
"resid",
")",
",",
"atom",
".",
"segid",
")",
"if",
"bond",
"[",
"2",
"]",
"in",
"[",
"\"N\"",
",",
"\"O\"",
",",
"\"H\"",
"]",
":",
"#backbone interactions",
"self",
".",
"draw_hbonds",
"=",
"self",
".",
"draw_hbonds",
"+",
"\"<line id='\"",
"+",
"str",
"(",
"bond",
"[",
"0",
"]",
")",
"+",
"\"' class='HBonds' x1='\"",
"+",
"str",
"(",
"int",
"(",
"self",
".",
"molecule",
".",
"nearest_points_coords",
"[",
"residue",
"]",
"[",
"0",
"]",
")",
")",
"+",
"\"' y1='\"",
"+",
"str",
"(",
"int",
"(",
"self",
".",
"molecule",
".",
"nearest_points_coords",
"[",
"residue",
"]",
"[",
"1",
"]",
")",
")",
"+",
"\"' x2='\"",
"+",
"str",
"(",
"float",
"(",
"self",
".",
"molecule",
".",
"ligand_atom_coords_from_diagr",
"[",
"bond",
"[",
"1",
"]",
"]",
"[",
"0",
"]",
")",
")",
"+",
"\"' y2='\"",
"+",
"str",
"(",
"float",
"(",
"self",
".",
"molecule",
".",
"ligand_atom_coords_from_diagr",
"[",
"bond",
"[",
"1",
"]",
"]",
"[",
"1",
"]",
")",
")",
"+",
"\"' style='stroke:white;stroke-width:15' />\"",
"self",
".",
"draw_hbonds",
"=",
"self",
".",
"draw_hbonds",
"+",
"\"<line id='\"",
"+",
"str",
"(",
"bond",
"[",
"0",
"]",
")",
"+",
"\"' class='HBonds' x1='\"",
"+",
"str",
"(",
"int",
"(",
"self",
".",
"molecule",
".",
"nearest_points_coords",
"[",
"residue",
"]",
"[",
"0",
"]",
")",
")",
"+",
"\"' y1='\"",
"+",
"str",
"(",
"int",
"(",
"self",
".",
"molecule",
".",
"nearest_points_coords",
"[",
"residue",
"]",
"[",
"1",
"]",
")",
")",
"+",
"\"' x2='\"",
"+",
"str",
"(",
"float",
"(",
"self",
".",
"molecule",
".",
"ligand_atom_coords_from_diagr",
"[",
"bond",
"[",
"1",
"]",
"]",
"[",
"0",
"]",
")",
")",
"+",
"\"' y2='\"",
"+",
"str",
"(",
"float",
"(",
"self",
".",
"molecule",
".",
"ligand_atom_coords_from_diagr",
"[",
"bond",
"[",
"1",
"]",
"]",
"[",
"1",
"]",
")",
")",
"+",
"\"' style='stroke:\"",
"+",
"color",
"+",
"\";stroke-width:4' />\"",
"else",
":",
"#sidechain interactions",
"self",
".",
"draw_hbonds",
"=",
"self",
".",
"draw_hbonds",
"+",
"\"<line id='\"",
"+",
"str",
"(",
"bond",
"[",
"0",
"]",
")",
"+",
"\"' class='HBonds' x1='\"",
"+",
"str",
"(",
"int",
"(",
"self",
".",
"molecule",
".",
"nearest_points_coords",
"[",
"residue",
"]",
"[",
"0",
"]",
")",
")",
"+",
"\"' y1='\"",
"+",
"str",
"(",
"int",
"(",
"self",
".",
"molecule",
".",
"nearest_points_coords",
"[",
"residue",
"]",
"[",
"1",
"]",
")",
")",
"+",
"\"' x2='\"",
"+",
"str",
"(",
"float",
"(",
"self",
".",
"molecule",
".",
"ligand_atom_coords_from_diagr",
"[",
"bond",
"[",
"1",
"]",
"]",
"[",
"0",
"]",
")",
")",
"+",
"\"' y2='\"",
"+",
"str",
"(",
"float",
"(",
"self",
".",
"molecule",
".",
"ligand_atom_coords_from_diagr",
"[",
"bond",
"[",
"1",
"]",
"]",
"[",
"1",
"]",
")",
")",
"+",
"\"' style='stroke:white;stroke-width:15' />\"",
"self",
".",
"draw_hbonds",
"=",
"self",
".",
"draw_hbonds",
"+",
"\"<line id='\"",
"+",
"str",
"(",
"bond",
"[",
"0",
"]",
")",
"+",
"\"' class='HBonds' stroke-dasharray='5,5' x1='\"",
"+",
"str",
"(",
"int",
"(",
"self",
".",
"molecule",
".",
"nearest_points_coords",
"[",
"residue",
"]",
"[",
"0",
"]",
")",
")",
"+",
"\"' y1='\"",
"+",
"str",
"(",
"int",
"(",
"self",
".",
"molecule",
".",
"nearest_points_coords",
"[",
"residue",
"]",
"[",
"1",
"]",
")",
")",
"+",
"\"' x2='\"",
"+",
"str",
"(",
"float",
"(",
"self",
".",
"molecule",
".",
"ligand_atom_coords_from_diagr",
"[",
"bond",
"[",
"1",
"]",
"]",
"[",
"0",
"]",
")",
")",
"+",
"\"' y2='\"",
"+",
"str",
"(",
"float",
"(",
"self",
".",
"molecule",
".",
"ligand_atom_coords_from_diagr",
"[",
"bond",
"[",
"1",
"]",
"]",
"[",
"1",
"]",
")",
")",
"+",
"\"' style='stroke:\"",
"+",
"color",
"+",
"\";stroke-width:4' />\"",
"self",
".",
"draw_hbonds",
"+=",
"\"</g>\""
]
| For each bond that has been determined to be important, a line gets drawn. | [
"For",
"each",
"bond",
"that",
"has",
"been",
"determined",
"to",
"be",
"important",
"a",
"line",
"gets",
"drawn",
"."
]
| python | train | 120.2 |
osrg/ryu | ryu/services/protocols/vrrp/api.py | https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/vrrp/api.py#L21-L30 | def vrrp_config(app, interface, config):
"""create an instance.
returns EventVRRPConfigReply(instance.name, interface, config)
on success.
returns EventVRRPConfigReply(None, interface, config)
on failure.
"""
config_request = vrrp_event.EventVRRPConfigRequest(interface, config)
config_request.sync = True
return app.send_request(config_request) | [
"def",
"vrrp_config",
"(",
"app",
",",
"interface",
",",
"config",
")",
":",
"config_request",
"=",
"vrrp_event",
".",
"EventVRRPConfigRequest",
"(",
"interface",
",",
"config",
")",
"config_request",
".",
"sync",
"=",
"True",
"return",
"app",
".",
"send_request",
"(",
"config_request",
")"
]
| create an instance.
returns EventVRRPConfigReply(instance.name, interface, config)
on success.
returns EventVRRPConfigReply(None, interface, config)
on failure. | [
"create",
"an",
"instance",
".",
"returns",
"EventVRRPConfigReply",
"(",
"instance",
".",
"name",
"interface",
"config",
")",
"on",
"success",
".",
"returns",
"EventVRRPConfigReply",
"(",
"None",
"interface",
"config",
")",
"on",
"failure",
"."
]
| python | train | 37.2 |
samuelcolvin/pydantic | pydantic/schema.py | https://github.com/samuelcolvin/pydantic/blob/bff8a1789dfde2c38928cced6640887b53615aa3/pydantic/schema.py#L408-L474 | def field_type_schema(
field: Field,
*,
by_alias: bool,
model_name_map: Dict[Type['main.BaseModel'], str],
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Used by ``field_schema()``, you probably should be using that function.
Take a single ``field`` and generate the schema for its type only, not including additional
information as title, etc. Also return additional schema definitions, from sub-models.
"""
definitions = {}
ref_prefix = ref_prefix or default_prefix
if field.shape is Shape.LIST:
f_schema, f_definitions = field_singleton_schema(
field, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
definitions.update(f_definitions)
return {'type': 'array', 'items': f_schema}, definitions
elif field.shape is Shape.SET:
f_schema, f_definitions = field_singleton_schema(
field, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
definitions.update(f_definitions)
return {'type': 'array', 'uniqueItems': True, 'items': f_schema}, definitions
elif field.shape is Shape.MAPPING:
dict_schema: Dict[str, Any] = {'type': 'object'}
key_field = cast(Field, field.key_field)
regex = getattr(key_field.type_, 'regex', None)
f_schema, f_definitions = field_singleton_schema(
field, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
definitions.update(f_definitions)
if regex:
# Dict keys have a regex pattern
# f_schema might be a schema or empty dict, add it either way
dict_schema['patternProperties'] = {regex.pattern: f_schema}
elif f_schema:
# The dict values are not simply Any, so they need a schema
dict_schema['additionalProperties'] = f_schema
return dict_schema, definitions
elif field.shape is Shape.TUPLE:
sub_schema = []
sub_fields = cast(List[Field], field.sub_fields)
for sf in sub_fields:
sf_schema, sf_definitions = field_type_schema(
sf, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
definitions.update(sf_definitions)
sub_schema.append(sf_schema)
if len(sub_schema) == 1:
sub_schema = sub_schema[0] # type: ignore
return {'type': 'array', 'items': sub_schema}, definitions
else:
assert field.shape is Shape.SINGLETON, field.shape
f_schema, f_definitions = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
)
definitions.update(f_definitions)
return f_schema, definitions | [
"def",
"field_type_schema",
"(",
"field",
":",
"Field",
",",
"*",
",",
"by_alias",
":",
"bool",
",",
"model_name_map",
":",
"Dict",
"[",
"Type",
"[",
"'main.BaseModel'",
"]",
",",
"str",
"]",
",",
"schema_overrides",
":",
"bool",
"=",
"False",
",",
"ref_prefix",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
")",
"->",
"Tuple",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
",",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
":",
"definitions",
"=",
"{",
"}",
"ref_prefix",
"=",
"ref_prefix",
"or",
"default_prefix",
"if",
"field",
".",
"shape",
"is",
"Shape",
".",
"LIST",
":",
"f_schema",
",",
"f_definitions",
"=",
"field_singleton_schema",
"(",
"field",
",",
"by_alias",
"=",
"by_alias",
",",
"model_name_map",
"=",
"model_name_map",
",",
"ref_prefix",
"=",
"ref_prefix",
")",
"definitions",
".",
"update",
"(",
"f_definitions",
")",
"return",
"{",
"'type'",
":",
"'array'",
",",
"'items'",
":",
"f_schema",
"}",
",",
"definitions",
"elif",
"field",
".",
"shape",
"is",
"Shape",
".",
"SET",
":",
"f_schema",
",",
"f_definitions",
"=",
"field_singleton_schema",
"(",
"field",
",",
"by_alias",
"=",
"by_alias",
",",
"model_name_map",
"=",
"model_name_map",
",",
"ref_prefix",
"=",
"ref_prefix",
")",
"definitions",
".",
"update",
"(",
"f_definitions",
")",
"return",
"{",
"'type'",
":",
"'array'",
",",
"'uniqueItems'",
":",
"True",
",",
"'items'",
":",
"f_schema",
"}",
",",
"definitions",
"elif",
"field",
".",
"shape",
"is",
"Shape",
".",
"MAPPING",
":",
"dict_schema",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"{",
"'type'",
":",
"'object'",
"}",
"key_field",
"=",
"cast",
"(",
"Field",
",",
"field",
".",
"key_field",
")",
"regex",
"=",
"getattr",
"(",
"key_field",
".",
"type_",
",",
"'regex'",
",",
"None",
")",
"f_schema",
",",
"f_definitions",
"=",
"field_singleton_schema",
"(",
"field",
",",
"by_alias",
"=",
"by_alias",
",",
"model_name_map",
"=",
"model_name_map",
",",
"ref_prefix",
"=",
"ref_prefix",
")",
"definitions",
".",
"update",
"(",
"f_definitions",
")",
"if",
"regex",
":",
"# Dict keys have a regex pattern",
"# f_schema might be a schema or empty dict, add it either way",
"dict_schema",
"[",
"'patternProperties'",
"]",
"=",
"{",
"regex",
".",
"pattern",
":",
"f_schema",
"}",
"elif",
"f_schema",
":",
"# The dict values are not simply Any, so they need a schema",
"dict_schema",
"[",
"'additionalProperties'",
"]",
"=",
"f_schema",
"return",
"dict_schema",
",",
"definitions",
"elif",
"field",
".",
"shape",
"is",
"Shape",
".",
"TUPLE",
":",
"sub_schema",
"=",
"[",
"]",
"sub_fields",
"=",
"cast",
"(",
"List",
"[",
"Field",
"]",
",",
"field",
".",
"sub_fields",
")",
"for",
"sf",
"in",
"sub_fields",
":",
"sf_schema",
",",
"sf_definitions",
"=",
"field_type_schema",
"(",
"sf",
",",
"by_alias",
"=",
"by_alias",
",",
"model_name_map",
"=",
"model_name_map",
",",
"ref_prefix",
"=",
"ref_prefix",
")",
"definitions",
".",
"update",
"(",
"sf_definitions",
")",
"sub_schema",
".",
"append",
"(",
"sf_schema",
")",
"if",
"len",
"(",
"sub_schema",
")",
"==",
"1",
":",
"sub_schema",
"=",
"sub_schema",
"[",
"0",
"]",
"# type: ignore",
"return",
"{",
"'type'",
":",
"'array'",
",",
"'items'",
":",
"sub_schema",
"}",
",",
"definitions",
"else",
":",
"assert",
"field",
".",
"shape",
"is",
"Shape",
".",
"SINGLETON",
",",
"field",
".",
"shape",
"f_schema",
",",
"f_definitions",
"=",
"field_singleton_schema",
"(",
"field",
",",
"by_alias",
"=",
"by_alias",
",",
"model_name_map",
"=",
"model_name_map",
",",
"schema_overrides",
"=",
"schema_overrides",
",",
"ref_prefix",
"=",
"ref_prefix",
",",
")",
"definitions",
".",
"update",
"(",
"f_definitions",
")",
"return",
"f_schema",
",",
"definitions"
]
| Used by ``field_schema()``, you probably should be using that function.
Take a single ``field`` and generate the schema for its type only, not including additional
information as title, etc. Also return additional schema definitions, from sub-models. | [
"Used",
"by",
"field_schema",
"()",
"you",
"probably",
"should",
"be",
"using",
"that",
"function",
"."
]
| python | train | 43.223881 |
edx/edx-enterprise | integrated_channels/degreed/client.py | https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/integrated_channels/degreed/client.py#L175-L188 | def _delete(self, url, data, scope):
"""
Make a DELETE request using the session object to a Degreed endpoint.
Args:
url (str): The url to send a DELETE request to.
data (str): The json encoded payload to DELETE.
scope (str): Must be one of the scopes Degreed expects:
- `CONTENT_PROVIDER_SCOPE`
- `COMPLETION_PROVIDER_SCOPE`
"""
self._create_session(scope)
response = self.session.delete(url, data=data)
return response.status_code, response.text | [
"def",
"_delete",
"(",
"self",
",",
"url",
",",
"data",
",",
"scope",
")",
":",
"self",
".",
"_create_session",
"(",
"scope",
")",
"response",
"=",
"self",
".",
"session",
".",
"delete",
"(",
"url",
",",
"data",
"=",
"data",
")",
"return",
"response",
".",
"status_code",
",",
"response",
".",
"text"
]
| Make a DELETE request using the session object to a Degreed endpoint.
Args:
url (str): The url to send a DELETE request to.
data (str): The json encoded payload to DELETE.
scope (str): Must be one of the scopes Degreed expects:
- `CONTENT_PROVIDER_SCOPE`
- `COMPLETION_PROVIDER_SCOPE` | [
"Make",
"a",
"DELETE",
"request",
"using",
"the",
"session",
"object",
"to",
"a",
"Degreed",
"endpoint",
"."
]
| python | valid | 41.071429 |
inveniosoftware/invenio-communities | invenio_communities/ext.py | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/ext.py#L50-L57 | def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
app.cli.add_command(cmd)
app.extensions['invenio-communities'] = self
# Register the jinja do extension
app.jinja_env.add_extension('jinja2.ext.do')
self.register_signals(app) | [
"def",
"init_app",
"(",
"self",
",",
"app",
")",
":",
"self",
".",
"init_config",
"(",
"app",
")",
"app",
".",
"cli",
".",
"add_command",
"(",
"cmd",
")",
"app",
".",
"extensions",
"[",
"'invenio-communities'",
"]",
"=",
"self",
"# Register the jinja do extension",
"app",
".",
"jinja_env",
".",
"add_extension",
"(",
"'jinja2.ext.do'",
")",
"self",
".",
"register_signals",
"(",
"app",
")"
]
| Flask application initialization. | [
"Flask",
"application",
"initialization",
"."
]
| python | train | 38.875 |
getgauge/gauge-python | getgauge/parser_parso.py | https://github.com/getgauge/gauge-python/blob/90f3547dcfd2d16d51f116cdd4e53527eeab1a57/getgauge/parser_parso.py#L59-L77 | def _step_decorator_args(self, decorator):
"""
Get the arguments passed to step decorators
converted to python objects.
"""
args = decorator.children[3:-2]
step = None
if len(args) == 1:
try:
step = ast.literal_eval(args[0].get_code())
except (ValueError, SyntaxError):
pass
if isinstance(step, six.string_types+(list,)):
return step
logging.error("Decorator step accepts either a string or a list of strings - %s:%d",
self.file_path, decorator.start_pos[0])
else:
logging.error("Decorator step accepts only one argument - %s:%d",
self.file_path, decorator.start_pos[0]) | [
"def",
"_step_decorator_args",
"(",
"self",
",",
"decorator",
")",
":",
"args",
"=",
"decorator",
".",
"children",
"[",
"3",
":",
"-",
"2",
"]",
"step",
"=",
"None",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"try",
":",
"step",
"=",
"ast",
".",
"literal_eval",
"(",
"args",
"[",
"0",
"]",
".",
"get_code",
"(",
")",
")",
"except",
"(",
"ValueError",
",",
"SyntaxError",
")",
":",
"pass",
"if",
"isinstance",
"(",
"step",
",",
"six",
".",
"string_types",
"+",
"(",
"list",
",",
")",
")",
":",
"return",
"step",
"logging",
".",
"error",
"(",
"\"Decorator step accepts either a string or a list of strings - %s:%d\"",
",",
"self",
".",
"file_path",
",",
"decorator",
".",
"start_pos",
"[",
"0",
"]",
")",
"else",
":",
"logging",
".",
"error",
"(",
"\"Decorator step accepts only one argument - %s:%d\"",
",",
"self",
".",
"file_path",
",",
"decorator",
".",
"start_pos",
"[",
"0",
"]",
")"
]
| Get the arguments passed to step decorators
converted to python objects. | [
"Get",
"the",
"arguments",
"passed",
"to",
"step",
"decorators",
"converted",
"to",
"python",
"objects",
"."
]
| python | test | 40.842105 |
idlesign/srptools | srptools/utils.py | https://github.com/idlesign/srptools/blob/eb08a27137d3216e41d63bbeafbac79f43881a6a/srptools/utils.py#L22-L34 | def hex_from(val):
"""Returns hex string representation for a given value.
:param bytes|str|unicode|int|long val:
:rtype: bytes|str
"""
if isinstance(val, integer_types):
hex_str = '%x' % val
if len(hex_str) % 2:
hex_str = '0' + hex_str
return hex_str
return hexlify(val) | [
"def",
"hex_from",
"(",
"val",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"integer_types",
")",
":",
"hex_str",
"=",
"'%x'",
"%",
"val",
"if",
"len",
"(",
"hex_str",
")",
"%",
"2",
":",
"hex_str",
"=",
"'0'",
"+",
"hex_str",
"return",
"hex_str",
"return",
"hexlify",
"(",
"val",
")"
]
| Returns hex string representation for a given value.
:param bytes|str|unicode|int|long val:
:rtype: bytes|str | [
"Returns",
"hex",
"string",
"representation",
"for",
"a",
"given",
"value",
"."
]
| python | train | 24.692308 |
CartoDB/carto-python | carto/maps.py | https://github.com/CartoDB/carto-python/blob/f6ac3d17ed08e5bc3f99edd2bde4fb7dba3eee16/carto/maps.py#L138-L163 | def instantiate(self, params, auth=None):
"""
Allows you to fetch the map tiles of a created map
:param params: The json with the styling info for the named map
:param auth: The auth client
:type params: dict
:type auth: :class:`carto.auth.APIKeyAuthClient`
:return:
:raise: CartoException
"""
try:
endpoint = (self.Meta.collection_endpoint
+ "{template_id}"). \
format(template_id=self.template_id)
if (auth is not None):
endpoint = (endpoint + "?auth_token={auth_token}"). \
format(auth_token=auth)
self.send(endpoint, "POST", json=params)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e) | [
"def",
"instantiate",
"(",
"self",
",",
"params",
",",
"auth",
"=",
"None",
")",
":",
"try",
":",
"endpoint",
"=",
"(",
"self",
".",
"Meta",
".",
"collection_endpoint",
"+",
"\"{template_id}\"",
")",
".",
"format",
"(",
"template_id",
"=",
"self",
".",
"template_id",
")",
"if",
"(",
"auth",
"is",
"not",
"None",
")",
":",
"endpoint",
"=",
"(",
"endpoint",
"+",
"\"?auth_token={auth_token}\"",
")",
".",
"format",
"(",
"auth_token",
"=",
"auth",
")",
"self",
".",
"send",
"(",
"endpoint",
",",
"\"POST\"",
",",
"json",
"=",
"params",
")",
"except",
"CartoRateLimitException",
"as",
"e",
":",
"raise",
"e",
"except",
"Exception",
"as",
"e",
":",
"raise",
"CartoException",
"(",
"e",
")"
]
| Allows you to fetch the map tiles of a created map
:param params: The json with the styling info for the named map
:param auth: The auth client
:type params: dict
:type auth: :class:`carto.auth.APIKeyAuthClient`
:return:
:raise: CartoException | [
"Allows",
"you",
"to",
"fetch",
"the",
"map",
"tiles",
"of",
"a",
"created",
"map"
]
| python | train | 32.461538 |
pulumi/pulumi | sdk/python/lib/pulumi/config.py | https://github.com/pulumi/pulumi/blob/95d51efe6ab9a533838b6d83aa240b5f912e72aa/sdk/python/lib/pulumi/config.py#L115-L127 | def require(self, key: str) -> str:
"""
Returns a configuration value by its given key. If it doesn't exist, an error is thrown.
:param str key: The requested configuration key.
:return: The configuration key's value.
:rtype: str
:raises ConfigMissingError: The configuration value did not exist.
"""
v = self.get(key)
if v is None:
raise ConfigMissingError(self.full_key(key))
return v | [
"def",
"require",
"(",
"self",
",",
"key",
":",
"str",
")",
"->",
"str",
":",
"v",
"=",
"self",
".",
"get",
"(",
"key",
")",
"if",
"v",
"is",
"None",
":",
"raise",
"ConfigMissingError",
"(",
"self",
".",
"full_key",
"(",
"key",
")",
")",
"return",
"v"
]
| Returns a configuration value by its given key. If it doesn't exist, an error is thrown.
:param str key: The requested configuration key.
:return: The configuration key's value.
:rtype: str
:raises ConfigMissingError: The configuration value did not exist. | [
"Returns",
"a",
"configuration",
"value",
"by",
"its",
"given",
"key",
".",
"If",
"it",
"doesn",
"t",
"exist",
"an",
"error",
"is",
"thrown",
"."
]
| python | train | 36 |
bookieio/breadability | breadability/readable.py | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/readable.py#L446-L458 | def _handle_no_candidates(self):
"""
If we fail to find a good candidate we need to find something else.
"""
# since we've not found a good candidate we're should help this
if self.dom is not None and len(self.dom):
dom = prep_article(self.dom)
dom = build_base_document(dom, self._return_fragment)
return self._remove_orphans(
dom.get_element_by_id("readabilityBody"))
else:
logger.info("No document to use.")
return build_error_document(self._return_fragment) | [
"def",
"_handle_no_candidates",
"(",
"self",
")",
":",
"# since we've not found a good candidate we're should help this",
"if",
"self",
".",
"dom",
"is",
"not",
"None",
"and",
"len",
"(",
"self",
".",
"dom",
")",
":",
"dom",
"=",
"prep_article",
"(",
"self",
".",
"dom",
")",
"dom",
"=",
"build_base_document",
"(",
"dom",
",",
"self",
".",
"_return_fragment",
")",
"return",
"self",
".",
"_remove_orphans",
"(",
"dom",
".",
"get_element_by_id",
"(",
"\"readabilityBody\"",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"No document to use.\"",
")",
"return",
"build_error_document",
"(",
"self",
".",
"_return_fragment",
")"
]
| If we fail to find a good candidate we need to find something else. | [
"If",
"we",
"fail",
"to",
"find",
"a",
"good",
"candidate",
"we",
"need",
"to",
"find",
"something",
"else",
"."
]
| python | train | 44.076923 |
MillionIntegrals/vel | vel/commands/phase_train_command.py | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/commands/phase_train_command.py#L50-L106 | def run(self):
""" Run the command with supplied configuration """
device = self.model_config.torch_device()
learner = Learner(device, self.model_factory.instantiate())
# All callbacks useful for learning
callbacks = self.gather_callbacks()
# Metrics to track through this training
metrics = learner.metrics()
# Check if training was already started and potentially continue where we left off
training_info, hidden_state = self.resume_training(learner, callbacks, metrics)
# Prepare current training phase
current_phase_idx = self._select_phase_left_bound(training_info.start_epoch_idx)
current_phase = self.phases[current_phase_idx]
local_idx = training_info.start_epoch_idx - self.ladder[current_phase_idx]
current_phase.set_up_phase(training_info, learner.model, self.source)
print(current_phase.banner())
if training_info.start_epoch_idx > 0:
current_phase.restore(training_info, local_idx, learner.model, hidden_state)
training_info.on_train_begin()
for global_epoch_idx in range(training_info.start_epoch_idx + 1, self.full_number_of_epochs + 1):
iteration_phase_idx = self._select_phase_right_bound(global_epoch_idx-1)
local_idx = global_epoch_idx - self.ladder[iteration_phase_idx]
# Phase preparations
while current_phase_idx != iteration_phase_idx:
current_phase.tear_down_phase(training_info, learner.model)
current_phase_idx += 1
current_phase = self.phases[current_phase_idx]
current_phase.set_up_phase(training_info, learner.model, self.source)
print(current_phase.banner())
# Create epoch info
epoch_info = current_phase.epoch_info(training_info, global_epoch_idx, local_idx)
# Execute learning
current_phase.execute_epoch(epoch_info, learner)
# Epoch checkpoint
self.storage.checkpoint(epoch_info, learner.model)
# Tear down the last phase
if current_phase is not None:
current_phase.tear_down_phase(training_info, learner.model)
training_info.on_train_end()
return training_info | [
"def",
"run",
"(",
"self",
")",
":",
"device",
"=",
"self",
".",
"model_config",
".",
"torch_device",
"(",
")",
"learner",
"=",
"Learner",
"(",
"device",
",",
"self",
".",
"model_factory",
".",
"instantiate",
"(",
")",
")",
"# All callbacks useful for learning",
"callbacks",
"=",
"self",
".",
"gather_callbacks",
"(",
")",
"# Metrics to track through this training",
"metrics",
"=",
"learner",
".",
"metrics",
"(",
")",
"# Check if training was already started and potentially continue where we left off",
"training_info",
",",
"hidden_state",
"=",
"self",
".",
"resume_training",
"(",
"learner",
",",
"callbacks",
",",
"metrics",
")",
"# Prepare current training phase",
"current_phase_idx",
"=",
"self",
".",
"_select_phase_left_bound",
"(",
"training_info",
".",
"start_epoch_idx",
")",
"current_phase",
"=",
"self",
".",
"phases",
"[",
"current_phase_idx",
"]",
"local_idx",
"=",
"training_info",
".",
"start_epoch_idx",
"-",
"self",
".",
"ladder",
"[",
"current_phase_idx",
"]",
"current_phase",
".",
"set_up_phase",
"(",
"training_info",
",",
"learner",
".",
"model",
",",
"self",
".",
"source",
")",
"print",
"(",
"current_phase",
".",
"banner",
"(",
")",
")",
"if",
"training_info",
".",
"start_epoch_idx",
">",
"0",
":",
"current_phase",
".",
"restore",
"(",
"training_info",
",",
"local_idx",
",",
"learner",
".",
"model",
",",
"hidden_state",
")",
"training_info",
".",
"on_train_begin",
"(",
")",
"for",
"global_epoch_idx",
"in",
"range",
"(",
"training_info",
".",
"start_epoch_idx",
"+",
"1",
",",
"self",
".",
"full_number_of_epochs",
"+",
"1",
")",
":",
"iteration_phase_idx",
"=",
"self",
".",
"_select_phase_right_bound",
"(",
"global_epoch_idx",
"-",
"1",
")",
"local_idx",
"=",
"global_epoch_idx",
"-",
"self",
".",
"ladder",
"[",
"iteration_phase_idx",
"]",
"# Phase preparations",
"while",
"current_phase_idx",
"!=",
"iteration_phase_idx",
":",
"current_phase",
".",
"tear_down_phase",
"(",
"training_info",
",",
"learner",
".",
"model",
")",
"current_phase_idx",
"+=",
"1",
"current_phase",
"=",
"self",
".",
"phases",
"[",
"current_phase_idx",
"]",
"current_phase",
".",
"set_up_phase",
"(",
"training_info",
",",
"learner",
".",
"model",
",",
"self",
".",
"source",
")",
"print",
"(",
"current_phase",
".",
"banner",
"(",
")",
")",
"# Create epoch info",
"epoch_info",
"=",
"current_phase",
".",
"epoch_info",
"(",
"training_info",
",",
"global_epoch_idx",
",",
"local_idx",
")",
"# Execute learning",
"current_phase",
".",
"execute_epoch",
"(",
"epoch_info",
",",
"learner",
")",
"# Epoch checkpoint",
"self",
".",
"storage",
".",
"checkpoint",
"(",
"epoch_info",
",",
"learner",
".",
"model",
")",
"# Tear down the last phase",
"if",
"current_phase",
"is",
"not",
"None",
":",
"current_phase",
".",
"tear_down_phase",
"(",
"training_info",
",",
"learner",
".",
"model",
")",
"training_info",
".",
"on_train_end",
"(",
")",
"return",
"training_info"
]
| Run the command with supplied configuration | [
"Run",
"the",
"command",
"with",
"supplied",
"configuration"
]
| python | train | 39.578947 |
matplotlib/cmocean | cmocean/tools.py | https://github.com/matplotlib/cmocean/blob/37edd4a209a733d87dea7fed9eb22adc1d5a57c8/cmocean/tools.py#L73-L91 | def cmap(rgbin, N=256):
'''Input an array of rgb values to generate a colormap.
:param rgbin: An [mx3] array, where m is the number of input color triplets which
are interpolated between to make the colormap that is returned. hex values
can be input instead, as [mx1] in single quotes with a #.
:param N=10: The number of levels to be interpolated to.
'''
# rgb inputs here
if not isinstance(rgbin[0], _string_types):
# normalize to be out of 1 if out of 256 instead
if rgbin.max() > 1:
rgbin = rgbin/256.
cmap = mpl.colors.LinearSegmentedColormap.from_list('mycmap', rgbin, N=N)
return cmap | [
"def",
"cmap",
"(",
"rgbin",
",",
"N",
"=",
"256",
")",
":",
"# rgb inputs here",
"if",
"not",
"isinstance",
"(",
"rgbin",
"[",
"0",
"]",
",",
"_string_types",
")",
":",
"# normalize to be out of 1 if out of 256 instead",
"if",
"rgbin",
".",
"max",
"(",
")",
">",
"1",
":",
"rgbin",
"=",
"rgbin",
"/",
"256.",
"cmap",
"=",
"mpl",
".",
"colors",
".",
"LinearSegmentedColormap",
".",
"from_list",
"(",
"'mycmap'",
",",
"rgbin",
",",
"N",
"=",
"N",
")",
"return",
"cmap"
]
| Input an array of rgb values to generate a colormap.
:param rgbin: An [mx3] array, where m is the number of input color triplets which
are interpolated between to make the colormap that is returned. hex values
can be input instead, as [mx1] in single quotes with a #.
:param N=10: The number of levels to be interpolated to. | [
"Input",
"an",
"array",
"of",
"rgb",
"values",
"to",
"generate",
"a",
"colormap",
"."
]
| python | train | 34.526316 |
UAVCAN/pyuavcan | uavcan/driver/common.py | https://github.com/UAVCAN/pyuavcan/blob/a06a9975c1c0de4f1d469f05b29b374332968e2b/uavcan/driver/common.py#L64-L76 | def add_io_hook(self, hook):
"""
Args:
hook: This hook will be invoked for every incoming and outgoing CAN frame.
Hook arguments: (direction, frame)
See FRAME_DIRECTION_*, CANFrame.
"""
def proxy(*args):
hook(*args)
self._io_hooks.append(proxy)
return self.HookRemover(lambda: self._io_hooks.remove(proxy)) | [
"def",
"add_io_hook",
"(",
"self",
",",
"hook",
")",
":",
"def",
"proxy",
"(",
"*",
"args",
")",
":",
"hook",
"(",
"*",
"args",
")",
"self",
".",
"_io_hooks",
".",
"append",
"(",
"proxy",
")",
"return",
"self",
".",
"HookRemover",
"(",
"lambda",
":",
"self",
".",
"_io_hooks",
".",
"remove",
"(",
"proxy",
")",
")"
]
| Args:
hook: This hook will be invoked for every incoming and outgoing CAN frame.
Hook arguments: (direction, frame)
See FRAME_DIRECTION_*, CANFrame. | [
"Args",
":",
"hook",
":",
"This",
"hook",
"will",
"be",
"invoked",
"for",
"every",
"incoming",
"and",
"outgoing",
"CAN",
"frame",
".",
"Hook",
"arguments",
":",
"(",
"direction",
"frame",
")",
"See",
"FRAME_DIRECTION_",
"*",
"CANFrame",
"."
]
| python | train | 31.538462 |
pydata/xarray | xarray/core/dataarray.py | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/dataarray.py#L328-L358 | def to_dataset(self, dim=None, name=None):
"""Convert a DataArray to a Dataset.
Parameters
----------
dim : str, optional
Name of the dimension on this array along which to split this array
into separate variables. If not provided, this array is converted
into a Dataset of one variable.
name : str, optional
Name to substitute for this array's name. Only valid if ``dim`` is
not provided.
Returns
-------
dataset : Dataset
"""
if dim is not None and dim not in self.dims:
warnings.warn('the order of the arguments on DataArray.to_dataset '
'has changed; you now need to supply ``name`` as '
'a keyword argument',
FutureWarning, stacklevel=2)
name = dim
dim = None
if dim is not None:
if name is not None:
raise TypeError('cannot supply both dim and name arguments')
return self._to_dataset_split(dim)
else:
return self._to_dataset_whole(name) | [
"def",
"to_dataset",
"(",
"self",
",",
"dim",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"dim",
"is",
"not",
"None",
"and",
"dim",
"not",
"in",
"self",
".",
"dims",
":",
"warnings",
".",
"warn",
"(",
"'the order of the arguments on DataArray.to_dataset '",
"'has changed; you now need to supply ``name`` as '",
"'a keyword argument'",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"2",
")",
"name",
"=",
"dim",
"dim",
"=",
"None",
"if",
"dim",
"is",
"not",
"None",
":",
"if",
"name",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"'cannot supply both dim and name arguments'",
")",
"return",
"self",
".",
"_to_dataset_split",
"(",
"dim",
")",
"else",
":",
"return",
"self",
".",
"_to_dataset_whole",
"(",
"name",
")"
]
| Convert a DataArray to a Dataset.
Parameters
----------
dim : str, optional
Name of the dimension on this array along which to split this array
into separate variables. If not provided, this array is converted
into a Dataset of one variable.
name : str, optional
Name to substitute for this array's name. Only valid if ``dim`` is
not provided.
Returns
-------
dataset : Dataset | [
"Convert",
"a",
"DataArray",
"to",
"a",
"Dataset",
"."
]
| python | train | 36.709677 |
juju/charm-helpers | charmhelpers/contrib/storage/linux/ceph.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/storage/linux/ceph.py#L1485-L1504 | def is_broker_action_done(action, rid=None, unit=None):
"""Check whether broker action has completed yet.
@param action: name of action to be performed
@returns True if action complete otherwise False
"""
rdata = relation_get(rid, unit) or {}
broker_rsp = rdata.get(get_broker_rsp_key())
if not broker_rsp:
return False
rsp = CephBrokerRsp(broker_rsp)
unit_name = local_unit().partition('/')[2]
key = "unit_{}_ceph_broker_action.{}".format(unit_name, action)
kvstore = kv()
val = kvstore.get(key=key)
if val and val == rsp.request_id:
return True
return False | [
"def",
"is_broker_action_done",
"(",
"action",
",",
"rid",
"=",
"None",
",",
"unit",
"=",
"None",
")",
":",
"rdata",
"=",
"relation_get",
"(",
"rid",
",",
"unit",
")",
"or",
"{",
"}",
"broker_rsp",
"=",
"rdata",
".",
"get",
"(",
"get_broker_rsp_key",
"(",
")",
")",
"if",
"not",
"broker_rsp",
":",
"return",
"False",
"rsp",
"=",
"CephBrokerRsp",
"(",
"broker_rsp",
")",
"unit_name",
"=",
"local_unit",
"(",
")",
".",
"partition",
"(",
"'/'",
")",
"[",
"2",
"]",
"key",
"=",
"\"unit_{}_ceph_broker_action.{}\"",
".",
"format",
"(",
"unit_name",
",",
"action",
")",
"kvstore",
"=",
"kv",
"(",
")",
"val",
"=",
"kvstore",
".",
"get",
"(",
"key",
"=",
"key",
")",
"if",
"val",
"and",
"val",
"==",
"rsp",
".",
"request_id",
":",
"return",
"True",
"return",
"False"
]
| Check whether broker action has completed yet.
@param action: name of action to be performed
@returns True if action complete otherwise False | [
"Check",
"whether",
"broker",
"action",
"has",
"completed",
"yet",
"."
]
| python | train | 30.75 |
stevelittlefish/littlefish | littlefish/util.py | https://github.com/stevelittlefish/littlefish/blob/6deee7f81fab30716c743efe2e94e786c6e17016/littlefish/util.py#L342-L356 | def from_base62(s):
"""
Convert a base62 String back into a number
:param s: The base62 encoded String
:return: The number encoded in the String (integer)
"""
result = 0
for c in s:
if c not in BASE62_MAP:
raise Exception('Invalid base64 string: %s' % s)
result = result * 62 + BASE62_MAP.index(c)
return result | [
"def",
"from_base62",
"(",
"s",
")",
":",
"result",
"=",
"0",
"for",
"c",
"in",
"s",
":",
"if",
"c",
"not",
"in",
"BASE62_MAP",
":",
"raise",
"Exception",
"(",
"'Invalid base64 string: %s'",
"%",
"s",
")",
"result",
"=",
"result",
"*",
"62",
"+",
"BASE62_MAP",
".",
"index",
"(",
"c",
")",
"return",
"result"
]
| Convert a base62 String back into a number
:param s: The base62 encoded String
:return: The number encoded in the String (integer) | [
"Convert",
"a",
"base62",
"String",
"back",
"into",
"a",
"number",
":",
"param",
"s",
":",
"The",
"base62",
"encoded",
"String",
":",
"return",
":",
"The",
"number",
"encoded",
"in",
"the",
"String",
"(",
"integer",
")"
]
| python | test | 24 |
poppy-project/pypot | pypot/vrep/remoteApiBindings/vrep.py | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L529-L534 | def simxAuxiliaryConsoleShow(clientID, consoleHandle, showState, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_AuxiliaryConsoleShow(clientID, consoleHandle, showState, operationMode) | [
"def",
"simxAuxiliaryConsoleShow",
"(",
"clientID",
",",
"consoleHandle",
",",
"showState",
",",
"operationMode",
")",
":",
"return",
"c_AuxiliaryConsoleShow",
"(",
"clientID",
",",
"consoleHandle",
",",
"showState",
",",
"operationMode",
")"
]
| Please have a look at the function description/documentation in the V-REP user manual | [
"Please",
"have",
"a",
"look",
"at",
"the",
"function",
"description",
"/",
"documentation",
"in",
"the",
"V",
"-",
"REP",
"user",
"manual"
]
| python | train | 44.5 |
BlueBrain/NeuroM | neurom/fst/_neuronfunc.py | https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuronfunc.py#L154-L177 | def trunk_angles(nrn, neurite_type=NeuriteType.all):
'''Calculates the angles between all the trunks of the neuron.
The angles are defined on the x-y plane and the trees
are sorted from the y axis and anticlock-wise.
'''
vectors = trunk_vectors(nrn, neurite_type=neurite_type)
# In order to avoid the failure of the process in case the neurite_type does not exist
if not vectors.size:
return []
def _sort_angle(p1, p2):
"""Angle between p1-p2 to sort vectors"""
ang1 = np.arctan2(*p1[::-1])
ang2 = np.arctan2(*p2[::-1])
return (ang1 - ang2)
# Sorting angles according to x-y plane
order = np.argsort(np.array([_sort_angle(i / np.linalg.norm(i), [0, 1])
for i in vectors[:, 0:2]]))
ordered_vectors = vectors[order][:, [COLS.X, COLS.Y]]
return [morphmath.angle_between_vectors(ordered_vectors[i], ordered_vectors[i - 1])
for i, _ in enumerate(ordered_vectors)] | [
"def",
"trunk_angles",
"(",
"nrn",
",",
"neurite_type",
"=",
"NeuriteType",
".",
"all",
")",
":",
"vectors",
"=",
"trunk_vectors",
"(",
"nrn",
",",
"neurite_type",
"=",
"neurite_type",
")",
"# In order to avoid the failure of the process in case the neurite_type does not exist",
"if",
"not",
"vectors",
".",
"size",
":",
"return",
"[",
"]",
"def",
"_sort_angle",
"(",
"p1",
",",
"p2",
")",
":",
"\"\"\"Angle between p1-p2 to sort vectors\"\"\"",
"ang1",
"=",
"np",
".",
"arctan2",
"(",
"*",
"p1",
"[",
":",
":",
"-",
"1",
"]",
")",
"ang2",
"=",
"np",
".",
"arctan2",
"(",
"*",
"p2",
"[",
":",
":",
"-",
"1",
"]",
")",
"return",
"(",
"ang1",
"-",
"ang2",
")",
"# Sorting angles according to x-y plane",
"order",
"=",
"np",
".",
"argsort",
"(",
"np",
".",
"array",
"(",
"[",
"_sort_angle",
"(",
"i",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"i",
")",
",",
"[",
"0",
",",
"1",
"]",
")",
"for",
"i",
"in",
"vectors",
"[",
":",
",",
"0",
":",
"2",
"]",
"]",
")",
")",
"ordered_vectors",
"=",
"vectors",
"[",
"order",
"]",
"[",
":",
",",
"[",
"COLS",
".",
"X",
",",
"COLS",
".",
"Y",
"]",
"]",
"return",
"[",
"morphmath",
".",
"angle_between_vectors",
"(",
"ordered_vectors",
"[",
"i",
"]",
",",
"ordered_vectors",
"[",
"i",
"-",
"1",
"]",
")",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"ordered_vectors",
")",
"]"
]
| Calculates the angles between all the trunks of the neuron.
The angles are defined on the x-y plane and the trees
are sorted from the y axis and anticlock-wise. | [
"Calculates",
"the",
"angles",
"between",
"all",
"the",
"trunks",
"of",
"the",
"neuron",
".",
"The",
"angles",
"are",
"defined",
"on",
"the",
"x",
"-",
"y",
"plane",
"and",
"the",
"trees",
"are",
"sorted",
"from",
"the",
"y",
"axis",
"and",
"anticlock",
"-",
"wise",
"."
]
| python | train | 40.5 |
quasipedia/swaggery | swaggery/api.py | https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/api.py#L197-L234 | def operations(*operations):
'''Decorator for marking Resource methods as HTTP operations.
This decorator does a number of different things:
- It transfer onto itself docstring and annotations from the decorated
method, so as to be "transparent" with regards to introspection.
- It tranform the method so as to make it a classmethod.
- It invokes the method within a try-except condition, so as to
intercept and populate the Fail(<code>) conditions.'''
def decorator(method):
def wrapper(cls, request, start_response, **kwargs):
result_cache = []
try:
yield from method(cls, request, **kwargs)
except Respond as e:
# Inject messages as taken from signature
status = e.status
msg = utils.parse_return_annotation(method)[status]['message']
if status / 100 == 2: # All 2xx HTTP codes
e.description = msg
raise e
else: # HTTP Errors --> use werkzeug exceptions
raise CODES_TO_EXCEPTIONS[status](msg)
# Add operation-specific attributes to the method.
method.swagger_ops = operations
method.signature = inspect.signature(method)
method.source = inspect.getsource(method)
method.path_vars = utils.extract_pathvars(method)
# "Backport" the method introspective attributes to the wrapper.
wrapper.__name__ = method.__name__
wrapper.__doc__ = method.__doc__
wrapper.__annotations__ = method.__annotations__
wrapper.swagger_ops = method.swagger_ops
wrapper.signature = method.signature
wrapper.source = method.source
wrapper.path_vars = method.path_vars
return classmethod(wrapper)
return decorator | [
"def",
"operations",
"(",
"*",
"operations",
")",
":",
"def",
"decorator",
"(",
"method",
")",
":",
"def",
"wrapper",
"(",
"cls",
",",
"request",
",",
"start_response",
",",
"*",
"*",
"kwargs",
")",
":",
"result_cache",
"=",
"[",
"]",
"try",
":",
"yield",
"from",
"method",
"(",
"cls",
",",
"request",
",",
"*",
"*",
"kwargs",
")",
"except",
"Respond",
"as",
"e",
":",
"# Inject messages as taken from signature",
"status",
"=",
"e",
".",
"status",
"msg",
"=",
"utils",
".",
"parse_return_annotation",
"(",
"method",
")",
"[",
"status",
"]",
"[",
"'message'",
"]",
"if",
"status",
"/",
"100",
"==",
"2",
":",
"# All 2xx HTTP codes",
"e",
".",
"description",
"=",
"msg",
"raise",
"e",
"else",
":",
"# HTTP Errors --> use werkzeug exceptions",
"raise",
"CODES_TO_EXCEPTIONS",
"[",
"status",
"]",
"(",
"msg",
")",
"# Add operation-specific attributes to the method.",
"method",
".",
"swagger_ops",
"=",
"operations",
"method",
".",
"signature",
"=",
"inspect",
".",
"signature",
"(",
"method",
")",
"method",
".",
"source",
"=",
"inspect",
".",
"getsource",
"(",
"method",
")",
"method",
".",
"path_vars",
"=",
"utils",
".",
"extract_pathvars",
"(",
"method",
")",
"# \"Backport\" the method introspective attributes to the wrapper.",
"wrapper",
".",
"__name__",
"=",
"method",
".",
"__name__",
"wrapper",
".",
"__doc__",
"=",
"method",
".",
"__doc__",
"wrapper",
".",
"__annotations__",
"=",
"method",
".",
"__annotations__",
"wrapper",
".",
"swagger_ops",
"=",
"method",
".",
"swagger_ops",
"wrapper",
".",
"signature",
"=",
"method",
".",
"signature",
"wrapper",
".",
"source",
"=",
"method",
".",
"source",
"wrapper",
".",
"path_vars",
"=",
"method",
".",
"path_vars",
"return",
"classmethod",
"(",
"wrapper",
")",
"return",
"decorator"
]
| Decorator for marking Resource methods as HTTP operations.
This decorator does a number of different things:
- It transfer onto itself docstring and annotations from the decorated
method, so as to be "transparent" with regards to introspection.
- It tranform the method so as to make it a classmethod.
- It invokes the method within a try-except condition, so as to
intercept and populate the Fail(<code>) conditions. | [
"Decorator",
"for",
"marking",
"Resource",
"methods",
"as",
"HTTP",
"operations",
"."
]
| python | train | 48.052632 |
dfunckt/django-rules | rules/contrib/views.py | https://github.com/dfunckt/django-rules/blob/fcf3711122243c0c0c8124e9bb9bbb829f42ce1b/rules/contrib/views.py#L79-L145 | def permission_required(perm, fn=None, login_url=None, raise_exception=False, redirect_field_name=REDIRECT_FIELD_NAME):
"""
View decorator that checks for the given permissions before allowing the
view to execute. Use it like this::
from django.shortcuts import get_object_or_404
from rules.contrib.views import permission_required
from posts.models import Post
def get_post_by_pk(request, post_id):
return get_object_or_404(Post, pk=post_id)
@permission_required('posts.change_post', fn=get_post_by_pk)
def post_update(request, post_id):
# ...
``perm`` is either a permission name as a string, or a list of permission
names.
``fn`` is an optional callback that receives the same arguments as those
passed to the decorated view and must return the object to check
permissions against. If omitted, the decorator behaves just like Django's
``permission_required`` decorator, i.e. checks for model-level permissions.
``raise_exception`` is a boolean specifying whether to raise a
``django.core.exceptions.PermissionDenied`` exception if the check fails.
You will most likely want to set this argument to ``True`` if you have
specified a custom 403 response handler in your urlconf. If ``False``,
the user will be redirected to the URL specified by ``login_url``.
``login_url`` is an optional custom URL to redirect the user to if
permissions check fails. If omitted or empty, ``settings.LOGIN_URL`` is
used.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
# Normalize to a list of permissions
if isinstance(perm, six.string_types):
perms = (perm,)
else:
perms = perm
# Get the object to check permissions against
if callable(fn):
obj = fn(request, *args, **kwargs)
else: # pragma: no cover
obj = fn
# Get the user
user = request.user
# Check for permissions and return a response
if not user.has_perms(perms, obj):
# User does not have a required permission
if raise_exception:
raise PermissionDenied()
else:
return _redirect_to_login(request, view_func.__name__,
login_url, redirect_field_name)
else:
# User has all required permissions -- allow the view to execute
return view_func(request, *args, **kwargs)
return _wrapped_view
return decorator | [
"def",
"permission_required",
"(",
"perm",
",",
"fn",
"=",
"None",
",",
"login_url",
"=",
"None",
",",
"raise_exception",
"=",
"False",
",",
"redirect_field_name",
"=",
"REDIRECT_FIELD_NAME",
")",
":",
"def",
"decorator",
"(",
"view_func",
")",
":",
"@",
"wraps",
"(",
"view_func",
",",
"assigned",
"=",
"available_attrs",
"(",
"view_func",
")",
")",
"def",
"_wrapped_view",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Normalize to a list of permissions",
"if",
"isinstance",
"(",
"perm",
",",
"six",
".",
"string_types",
")",
":",
"perms",
"=",
"(",
"perm",
",",
")",
"else",
":",
"perms",
"=",
"perm",
"# Get the object to check permissions against",
"if",
"callable",
"(",
"fn",
")",
":",
"obj",
"=",
"fn",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"# pragma: no cover",
"obj",
"=",
"fn",
"# Get the user",
"user",
"=",
"request",
".",
"user",
"# Check for permissions and return a response",
"if",
"not",
"user",
".",
"has_perms",
"(",
"perms",
",",
"obj",
")",
":",
"# User does not have a required permission",
"if",
"raise_exception",
":",
"raise",
"PermissionDenied",
"(",
")",
"else",
":",
"return",
"_redirect_to_login",
"(",
"request",
",",
"view_func",
".",
"__name__",
",",
"login_url",
",",
"redirect_field_name",
")",
"else",
":",
"# User has all required permissions -- allow the view to execute",
"return",
"view_func",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_wrapped_view",
"return",
"decorator"
]
| View decorator that checks for the given permissions before allowing the
view to execute. Use it like this::
from django.shortcuts import get_object_or_404
from rules.contrib.views import permission_required
from posts.models import Post
def get_post_by_pk(request, post_id):
return get_object_or_404(Post, pk=post_id)
@permission_required('posts.change_post', fn=get_post_by_pk)
def post_update(request, post_id):
# ...
``perm`` is either a permission name as a string, or a list of permission
names.
``fn`` is an optional callback that receives the same arguments as those
passed to the decorated view and must return the object to check
permissions against. If omitted, the decorator behaves just like Django's
``permission_required`` decorator, i.e. checks for model-level permissions.
``raise_exception`` is a boolean specifying whether to raise a
``django.core.exceptions.PermissionDenied`` exception if the check fails.
You will most likely want to set this argument to ``True`` if you have
specified a custom 403 response handler in your urlconf. If ``False``,
the user will be redirected to the URL specified by ``login_url``.
``login_url`` is an optional custom URL to redirect the user to if
permissions check fails. If omitted or empty, ``settings.LOGIN_URL`` is
used. | [
"View",
"decorator",
"that",
"checks",
"for",
"the",
"given",
"permissions",
"before",
"allowing",
"the",
"view",
"to",
"execute",
".",
"Use",
"it",
"like",
"this",
"::"
]
| python | train | 40.462687 |
cltl/KafNafParserPy | KafNafParserPy/span_data.py | https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/span_data.py#L99-L110 | def get_id_head(self):
'''
Returns the id of the target that is set as "head"
@rtype: string
@return: the target id (or None) of the head target
'''
id_head = None
for target_node in self:
if target_node.is_head():
id_head = target_node.get_id()
break
return id_head | [
"def",
"get_id_head",
"(",
"self",
")",
":",
"id_head",
"=",
"None",
"for",
"target_node",
"in",
"self",
":",
"if",
"target_node",
".",
"is_head",
"(",
")",
":",
"id_head",
"=",
"target_node",
".",
"get_id",
"(",
")",
"break",
"return",
"id_head"
]
| Returns the id of the target that is set as "head"
@rtype: string
@return: the target id (or None) of the head target | [
"Returns",
"the",
"id",
"of",
"the",
"target",
"that",
"is",
"set",
"as",
"head"
]
| python | train | 30.333333 |
Skyscanner/pycfmodel | pycfmodel/model/cf_model.py | https://github.com/Skyscanner/pycfmodel/blob/e3da4db96f59c0a5dba06ae66ad25645775e5500/pycfmodel/model/cf_model.py#L32-L39 | def parse_parameters(self, parameters):
"""Parses and sets parameters in the model."""
self.parameters = []
for param_name, param_value in parameters.items():
p = Parameter(param_name, param_value)
if p:
self.parameters.append(p) | [
"def",
"parse_parameters",
"(",
"self",
",",
"parameters",
")",
":",
"self",
".",
"parameters",
"=",
"[",
"]",
"for",
"param_name",
",",
"param_value",
"in",
"parameters",
".",
"items",
"(",
")",
":",
"p",
"=",
"Parameter",
"(",
"param_name",
",",
"param_value",
")",
"if",
"p",
":",
"self",
".",
"parameters",
".",
"append",
"(",
"p",
")"
]
| Parses and sets parameters in the model. | [
"Parses",
"and",
"sets",
"parameters",
"in",
"the",
"model",
"."
]
| python | train | 35.875 |
jamieleshaw/lurklib | lurklib/channel.py | https://github.com/jamieleshaw/lurklib/blob/a861f35d880140422103dd78ec3239814e85fd7e/lurklib/channel.py#L139-L174 | def cmode(self, channel, modes=''):
"""
Sets or gets the channel mode.
Required arguments:
* channel - Channel to set/get modes of.
Optional arguments:
* modes='' - Modes to set.
If not specified return the modes of the channel.
"""
with self.lock:
self.is_in_channel(channel)
if not modes:
self.send('MODE %s' % channel)
modes = ''
mode_set_time = None
while self.readable():
msg = self._recv(rm_colon=True, \
expected_replies=('324', '329'))
if msg[0] == '324':
modes = msg[2].split()[1].replace('+', '', 1)
elif msg[0] == '329':
mode_set_time = self._m_time.localtime( \
int(msg[2].split()[1]))
return modes, mode_set_time
else:
self.send('MODE %s %s' % (channel, modes))
if self.readable():
msg = self._recv(expected_replies=('MODE',), \
ignore_unexpected_replies=True)
if msg[0]:
mode = msg[2]
self.parse_cmode_string(mode, msg[1])
if not self.hide_called_events:
self.stepback() | [
"def",
"cmode",
"(",
"self",
",",
"channel",
",",
"modes",
"=",
"''",
")",
":",
"with",
"self",
".",
"lock",
":",
"self",
".",
"is_in_channel",
"(",
"channel",
")",
"if",
"not",
"modes",
":",
"self",
".",
"send",
"(",
"'MODE %s'",
"%",
"channel",
")",
"modes",
"=",
"''",
"mode_set_time",
"=",
"None",
"while",
"self",
".",
"readable",
"(",
")",
":",
"msg",
"=",
"self",
".",
"_recv",
"(",
"rm_colon",
"=",
"True",
",",
"expected_replies",
"=",
"(",
"'324'",
",",
"'329'",
")",
")",
"if",
"msg",
"[",
"0",
"]",
"==",
"'324'",
":",
"modes",
"=",
"msg",
"[",
"2",
"]",
".",
"split",
"(",
")",
"[",
"1",
"]",
".",
"replace",
"(",
"'+'",
",",
"''",
",",
"1",
")",
"elif",
"msg",
"[",
"0",
"]",
"==",
"'329'",
":",
"mode_set_time",
"=",
"self",
".",
"_m_time",
".",
"localtime",
"(",
"int",
"(",
"msg",
"[",
"2",
"]",
".",
"split",
"(",
")",
"[",
"1",
"]",
")",
")",
"return",
"modes",
",",
"mode_set_time",
"else",
":",
"self",
".",
"send",
"(",
"'MODE %s %s'",
"%",
"(",
"channel",
",",
"modes",
")",
")",
"if",
"self",
".",
"readable",
"(",
")",
":",
"msg",
"=",
"self",
".",
"_recv",
"(",
"expected_replies",
"=",
"(",
"'MODE'",
",",
")",
",",
"ignore_unexpected_replies",
"=",
"True",
")",
"if",
"msg",
"[",
"0",
"]",
":",
"mode",
"=",
"msg",
"[",
"2",
"]",
"self",
".",
"parse_cmode_string",
"(",
"mode",
",",
"msg",
"[",
"1",
"]",
")",
"if",
"not",
"self",
".",
"hide_called_events",
":",
"self",
".",
"stepback",
"(",
")"
]
| Sets or gets the channel mode.
Required arguments:
* channel - Channel to set/get modes of.
Optional arguments:
* modes='' - Modes to set.
If not specified return the modes of the channel. | [
"Sets",
"or",
"gets",
"the",
"channel",
"mode",
".",
"Required",
"arguments",
":",
"*",
"channel",
"-",
"Channel",
"to",
"set",
"/",
"get",
"modes",
"of",
".",
"Optional",
"arguments",
":",
"*",
"modes",
"=",
"-",
"Modes",
"to",
"set",
".",
"If",
"not",
"specified",
"return",
"the",
"modes",
"of",
"the",
"channel",
"."
]
| python | train | 41.222222 |
adamchainz/kwargs-only | kwargs_only.py | https://github.com/adamchainz/kwargs-only/blob/a75246283358696a6112af3baf5002fa023f5336/kwargs_only.py#L10-L36 | def kwargs_only(func):
"""
Make a function only accept keyword arguments.
This can be dropped in Python 3 in lieu of:
def foo(*, bar=default):
"""
if hasattr(inspect, 'signature'): # pragma: no cover
# Python 3
signature = inspect.signature(func)
first_arg_name = list(signature.parameters.keys())[0]
else: # pragma: no cover
# Python 2
signature = inspect.getargspec(func)
first_arg_name = signature.args[0]
if first_arg_name in ('self', 'cls'):
allowable_args = 1
else:
allowable_args = 0
@wraps(func)
def wrapper(*args, **kwargs):
if len(args) > allowable_args:
raise TypeError("{} should only be called with keyword args".format(func.__name__))
return func(*args, **kwargs)
return wrapper | [
"def",
"kwargs_only",
"(",
"func",
")",
":",
"if",
"hasattr",
"(",
"inspect",
",",
"'signature'",
")",
":",
"# pragma: no cover",
"# Python 3",
"signature",
"=",
"inspect",
".",
"signature",
"(",
"func",
")",
"first_arg_name",
"=",
"list",
"(",
"signature",
".",
"parameters",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"else",
":",
"# pragma: no cover",
"# Python 2",
"signature",
"=",
"inspect",
".",
"getargspec",
"(",
"func",
")",
"first_arg_name",
"=",
"signature",
".",
"args",
"[",
"0",
"]",
"if",
"first_arg_name",
"in",
"(",
"'self'",
",",
"'cls'",
")",
":",
"allowable_args",
"=",
"1",
"else",
":",
"allowable_args",
"=",
"0",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"args",
")",
">",
"allowable_args",
":",
"raise",
"TypeError",
"(",
"\"{} should only be called with keyword args\"",
".",
"format",
"(",
"func",
".",
"__name__",
")",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
]
| Make a function only accept keyword arguments.
This can be dropped in Python 3 in lieu of:
def foo(*, bar=default): | [
"Make",
"a",
"function",
"only",
"accept",
"keyword",
"arguments",
".",
"This",
"can",
"be",
"dropped",
"in",
"Python",
"3",
"in",
"lieu",
"of",
":",
"def",
"foo",
"(",
"*",
"bar",
"=",
"default",
")",
":"
]
| python | train | 30.185185 |
chapel-lang/sphinxcontrib-chapeldomain | sphinxcontrib/chapeldomain.py | https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L975-L989 | def _make_module_refnode(self, builder, fromdocname, name, contnode):
"""Helper function to generate new xref node based on
current environment.
"""
# Get additional info for modules.
docname, synopsis, platform, deprecated = self.data['modules'][name]
title = name
if synopsis:
title += ': ' + synopsis
if deprecated:
title += _(' (deprecated)')
if platform:
title += ' (' + platform + ')'
return make_refnode(builder, fromdocname, docname,
'module-' + name, contnode, title) | [
"def",
"_make_module_refnode",
"(",
"self",
",",
"builder",
",",
"fromdocname",
",",
"name",
",",
"contnode",
")",
":",
"# Get additional info for modules.",
"docname",
",",
"synopsis",
",",
"platform",
",",
"deprecated",
"=",
"self",
".",
"data",
"[",
"'modules'",
"]",
"[",
"name",
"]",
"title",
"=",
"name",
"if",
"synopsis",
":",
"title",
"+=",
"': '",
"+",
"synopsis",
"if",
"deprecated",
":",
"title",
"+=",
"_",
"(",
"' (deprecated)'",
")",
"if",
"platform",
":",
"title",
"+=",
"' ('",
"+",
"platform",
"+",
"')'",
"return",
"make_refnode",
"(",
"builder",
",",
"fromdocname",
",",
"docname",
",",
"'module-'",
"+",
"name",
",",
"contnode",
",",
"title",
")"
]
| Helper function to generate new xref node based on
current environment. | [
"Helper",
"function",
"to",
"generate",
"new",
"xref",
"node",
"based",
"on",
"current",
"environment",
"."
]
| python | train | 40.4 |
glitchassassin/lackey | lackey/TemplateMatchers.py | https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/TemplateMatchers.py#L246-L253 | def _build_pyramid(self, image, levels):
""" Returns a list of reduced-size images, from smallest to original size """
pyramid = [image]
for l in range(levels-1):
if any(x < 20 for x in pyramid[-1].shape[:2]):
break
pyramid.append(cv2.pyrDown(pyramid[-1]))
return list(reversed(pyramid)) | [
"def",
"_build_pyramid",
"(",
"self",
",",
"image",
",",
"levels",
")",
":",
"pyramid",
"=",
"[",
"image",
"]",
"for",
"l",
"in",
"range",
"(",
"levels",
"-",
"1",
")",
":",
"if",
"any",
"(",
"x",
"<",
"20",
"for",
"x",
"in",
"pyramid",
"[",
"-",
"1",
"]",
".",
"shape",
"[",
":",
"2",
"]",
")",
":",
"break",
"pyramid",
".",
"append",
"(",
"cv2",
".",
"pyrDown",
"(",
"pyramid",
"[",
"-",
"1",
"]",
")",
")",
"return",
"list",
"(",
"reversed",
"(",
"pyramid",
")",
")"
]
| Returns a list of reduced-size images, from smallest to original size | [
"Returns",
"a",
"list",
"of",
"reduced",
"-",
"size",
"images",
"from",
"smallest",
"to",
"original",
"size"
]
| python | train | 44 |
gwpy/gwpy | gwpy/cli/timeseries.py | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/cli/timeseries.py#L65-L94 | def make_plot(self):
"""Generate the plot from time series and arguments
"""
plot = Plot(figsize=self.figsize, dpi=self.dpi)
ax = plot.gca(xscale='auto-gps')
# handle user specified plot labels
if self.args.legend:
nlegargs = len(self.args.legend[0])
else:
nlegargs = 0
if nlegargs > 0 and nlegargs != self.n_datasets:
warnings.warn('The number of legends specified must match '
'the number of time series'
' (channels * start times). '
'There are {:d} series and {:d} legends'.format(
len(self.timeseries), len(self.args.legend)))
nlegargs = 0 # don't use them
for i in range(0, self.n_datasets):
series = self.timeseries[i]
if nlegargs:
label = self.args.legend[0][i]
else:
label = series.channel.name
if self.usetex:
label = label_to_latex(label)
ax.plot(series, label=label)
return plot | [
"def",
"make_plot",
"(",
"self",
")",
":",
"plot",
"=",
"Plot",
"(",
"figsize",
"=",
"self",
".",
"figsize",
",",
"dpi",
"=",
"self",
".",
"dpi",
")",
"ax",
"=",
"plot",
".",
"gca",
"(",
"xscale",
"=",
"'auto-gps'",
")",
"# handle user specified plot labels",
"if",
"self",
".",
"args",
".",
"legend",
":",
"nlegargs",
"=",
"len",
"(",
"self",
".",
"args",
".",
"legend",
"[",
"0",
"]",
")",
"else",
":",
"nlegargs",
"=",
"0",
"if",
"nlegargs",
">",
"0",
"and",
"nlegargs",
"!=",
"self",
".",
"n_datasets",
":",
"warnings",
".",
"warn",
"(",
"'The number of legends specified must match '",
"'the number of time series'",
"' (channels * start times). '",
"'There are {:d} series and {:d} legends'",
".",
"format",
"(",
"len",
"(",
"self",
".",
"timeseries",
")",
",",
"len",
"(",
"self",
".",
"args",
".",
"legend",
")",
")",
")",
"nlegargs",
"=",
"0",
"# don't use them",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"self",
".",
"n_datasets",
")",
":",
"series",
"=",
"self",
".",
"timeseries",
"[",
"i",
"]",
"if",
"nlegargs",
":",
"label",
"=",
"self",
".",
"args",
".",
"legend",
"[",
"0",
"]",
"[",
"i",
"]",
"else",
":",
"label",
"=",
"series",
".",
"channel",
".",
"name",
"if",
"self",
".",
"usetex",
":",
"label",
"=",
"label_to_latex",
"(",
"label",
")",
"ax",
".",
"plot",
"(",
"series",
",",
"label",
"=",
"label",
")",
"return",
"plot"
]
| Generate the plot from time series and arguments | [
"Generate",
"the",
"plot",
"from",
"time",
"series",
"and",
"arguments"
]
| python | train | 37.033333 |
openstack/networking-cisco | networking_cisco/plugins/cisco/device_manager/rpc/devices_cfgagent_rpc_cb.py | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/device_manager/rpc/devices_cfgagent_rpc_cb.py#L58-L80 | def update_hosting_device_status(self, context, host, status_info):
"""Report status changes for hosting devices.
:param context: contains user information
:param host: originator of callback
:param status_info: Dictionary with list of hosting device ids for
each type of hosting device status to be updated i.e.::
{
HD_ACTIVE: list_of_ids_of_active_hds,
HD_NOT_RESPONDING: list_of_ids_of_not_responding_hds,
HD_DEAD: list_of_ids_of_dead_hds,
...
}
"""
for status, hd_ids in six.iteritems(status_info):
# update hosting device entry in db to new status
hd_spec = {'hosting_device': {'status': status}}
for hd_id in hd_ids:
self._dmplugin.update_hosting_device(context, hd_id, hd_spec)
if status == const.HD_DEAD or status == const.HD_ERROR:
self._dmplugin.handle_non_responding_hosting_devices(
context, host, hd_ids) | [
"def",
"update_hosting_device_status",
"(",
"self",
",",
"context",
",",
"host",
",",
"status_info",
")",
":",
"for",
"status",
",",
"hd_ids",
"in",
"six",
".",
"iteritems",
"(",
"status_info",
")",
":",
"# update hosting device entry in db to new status",
"hd_spec",
"=",
"{",
"'hosting_device'",
":",
"{",
"'status'",
":",
"status",
"}",
"}",
"for",
"hd_id",
"in",
"hd_ids",
":",
"self",
".",
"_dmplugin",
".",
"update_hosting_device",
"(",
"context",
",",
"hd_id",
",",
"hd_spec",
")",
"if",
"status",
"==",
"const",
".",
"HD_DEAD",
"or",
"status",
"==",
"const",
".",
"HD_ERROR",
":",
"self",
".",
"_dmplugin",
".",
"handle_non_responding_hosting_devices",
"(",
"context",
",",
"host",
",",
"hd_ids",
")"
]
| Report status changes for hosting devices.
:param context: contains user information
:param host: originator of callback
:param status_info: Dictionary with list of hosting device ids for
each type of hosting device status to be updated i.e.::
{
HD_ACTIVE: list_of_ids_of_active_hds,
HD_NOT_RESPONDING: list_of_ids_of_not_responding_hds,
HD_DEAD: list_of_ids_of_dead_hds,
...
} | [
"Report",
"status",
"changes",
"for",
"hosting",
"devices",
"."
]
| python | train | 46.478261 |
napalm-automation/napalm | napalm/nxos/nxos.py | https://github.com/napalm-automation/napalm/blob/c11ae8bb5ce395698704a0051cdf8d144fbb150d/napalm/nxos/nxos.py#L637-L643 | def _send_command(self, command, raw_text=False):
"""
Wrapper for NX-API show method.
Allows more code sharing between NX-API and SSH.
"""
return self.device.show(command, raw_text=raw_text) | [
"def",
"_send_command",
"(",
"self",
",",
"command",
",",
"raw_text",
"=",
"False",
")",
":",
"return",
"self",
".",
"device",
".",
"show",
"(",
"command",
",",
"raw_text",
"=",
"raw_text",
")"
]
| Wrapper for NX-API show method.
Allows more code sharing between NX-API and SSH. | [
"Wrapper",
"for",
"NX",
"-",
"API",
"show",
"method",
"."
]
| python | train | 32.142857 |
google-research/batch-ppo | agents/parts/normalize.py | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L110-L122 | def reset(self):
"""Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation.
"""
with tf.name_scope(self._name + '/reset'):
return tf.group(
self._count.assign(0),
self._mean.assign(tf.zeros_like(self._mean)),
self._var_sum.assign(tf.zeros_like(self._var_sum))) | [
"def",
"reset",
"(",
"self",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"self",
".",
"_name",
"+",
"'/reset'",
")",
":",
"return",
"tf",
".",
"group",
"(",
"self",
".",
"_count",
".",
"assign",
"(",
"0",
")",
",",
"self",
".",
"_mean",
".",
"assign",
"(",
"tf",
".",
"zeros_like",
"(",
"self",
".",
"_mean",
")",
")",
",",
"self",
".",
"_var_sum",
".",
"assign",
"(",
"tf",
".",
"zeros_like",
"(",
"self",
".",
"_var_sum",
")",
")",
")"
]
| Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation. | [
"Reset",
"the",
"estimates",
"of",
"mean",
"and",
"variance",
"."
]
| python | train | 27.307692 |
elliterate/capybara.py | capybara/queries/current_path_query.py | https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/queries/current_path_query.py#L26-L54 | def resolves_for(self, session):
"""
Returns whether this query resolves for the given session.
Args:
session (Session): The session for which this query should be executed.
Returns:
bool: Whether this query resolves.
"""
if self.url:
self.actual_path = session.current_url
else:
result = urlparse(session.current_url)
if self.only_path:
self.actual_path = result.path
else:
request_uri = result.path
if result.query:
request_uri += "?{0}".format(result.query)
self.actual_path = request_uri
if isregex(self.expected_path):
return self.expected_path.search(self.actual_path)
else:
return normalize_url(self.actual_path) == normalize_url(self.expected_path) | [
"def",
"resolves_for",
"(",
"self",
",",
"session",
")",
":",
"if",
"self",
".",
"url",
":",
"self",
".",
"actual_path",
"=",
"session",
".",
"current_url",
"else",
":",
"result",
"=",
"urlparse",
"(",
"session",
".",
"current_url",
")",
"if",
"self",
".",
"only_path",
":",
"self",
".",
"actual_path",
"=",
"result",
".",
"path",
"else",
":",
"request_uri",
"=",
"result",
".",
"path",
"if",
"result",
".",
"query",
":",
"request_uri",
"+=",
"\"?{0}\"",
".",
"format",
"(",
"result",
".",
"query",
")",
"self",
".",
"actual_path",
"=",
"request_uri",
"if",
"isregex",
"(",
"self",
".",
"expected_path",
")",
":",
"return",
"self",
".",
"expected_path",
".",
"search",
"(",
"self",
".",
"actual_path",
")",
"else",
":",
"return",
"normalize_url",
"(",
"self",
".",
"actual_path",
")",
"==",
"normalize_url",
"(",
"self",
".",
"expected_path",
")"
]
| Returns whether this query resolves for the given session.
Args:
session (Session): The session for which this query should be executed.
Returns:
bool: Whether this query resolves. | [
"Returns",
"whether",
"this",
"query",
"resolves",
"for",
"the",
"given",
"session",
"."
]
| python | test | 30.551724 |
acutesoftware/virtual-AI-simulator | vais/simulator.py | https://github.com/acutesoftware/virtual-AI-simulator/blob/57de679a5b1a58c38fefe6aea58af1f3a7e79c58/vais/simulator.py#L107-L114 | def _move_agent(self, agent, direction, wrap_allowed=True):
"""
moves agent 'agent' in 'direction'
"""
x,y = agent.coords['x'], agent.coords['y']
print('moving agent ', agent.name, 'to x,y=', direction, 'wrap_allowed = ', wrap_allowed)
agent.coords['x'] = x + direction[0]
agent.coords['y'] = y + direction[1] | [
"def",
"_move_agent",
"(",
"self",
",",
"agent",
",",
"direction",
",",
"wrap_allowed",
"=",
"True",
")",
":",
"x",
",",
"y",
"=",
"agent",
".",
"coords",
"[",
"'x'",
"]",
",",
"agent",
".",
"coords",
"[",
"'y'",
"]",
"print",
"(",
"'moving agent '",
",",
"agent",
".",
"name",
",",
"'to x,y='",
",",
"direction",
",",
"'wrap_allowed = '",
",",
"wrap_allowed",
")",
"agent",
".",
"coords",
"[",
"'x'",
"]",
"=",
"x",
"+",
"direction",
"[",
"0",
"]",
"agent",
".",
"coords",
"[",
"'y'",
"]",
"=",
"y",
"+",
"direction",
"[",
"1",
"]"
]
| moves agent 'agent' in 'direction' | [
"moves",
"agent",
"agent",
"in",
"direction"
]
| python | train | 44.75 |
juju/charm-helpers | charmhelpers/contrib/peerstorage/__init__.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/peerstorage/__init__.py#L192-L201 | def peer_store(key, value, relation_name='cluster'):
"""Store the key/value pair on the named peer relation `relation_name`."""
cluster_rels = relation_ids(relation_name)
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
relation_set(relation_id=cluster_rid,
relation_settings={key: value})
else:
raise ValueError('Unable to detect '
'peer relation {}'.format(relation_name)) | [
"def",
"peer_store",
"(",
"key",
",",
"value",
",",
"relation_name",
"=",
"'cluster'",
")",
":",
"cluster_rels",
"=",
"relation_ids",
"(",
"relation_name",
")",
"if",
"len",
"(",
"cluster_rels",
")",
">",
"0",
":",
"cluster_rid",
"=",
"cluster_rels",
"[",
"0",
"]",
"relation_set",
"(",
"relation_id",
"=",
"cluster_rid",
",",
"relation_settings",
"=",
"{",
"key",
":",
"value",
"}",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unable to detect '",
"'peer relation {}'",
".",
"format",
"(",
"relation_name",
")",
")"
]
| Store the key/value pair on the named peer relation `relation_name`. | [
"Store",
"the",
"key",
"/",
"value",
"pair",
"on",
"the",
"named",
"peer",
"relation",
"relation_name",
"."
]
| python | train | 45.8 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.