code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _drop_axis(self, labels, axis, level=None, errors='raise'):
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == 'raise' and indexer.all():
raise KeyError('{} not found in axis'.format(labels))
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == 'raise' and labels_missing:
raise KeyError('{} not found in axis'.format(labels))
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result | Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped. | Below is the the instruction that describes the task:
### Input:
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
### Response:
def _drop_axis(self, labels, axis, level=None, errors='raise'):
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == 'raise' and indexer.all():
raise KeyError('{} not found in axis'.format(labels))
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == 'raise' and labels_missing:
raise KeyError('{} not found in axis'.format(labels))
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result |
def read_file(filename):
"""Read package file as text to get name and version"""
# intentionally *not* adding an encoding option to open
# see here:
# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, 'graphql_compiler', filename), 'r') as f:
return f.read() | Read package file as text to get name and version | Below is the the instruction that describes the task:
### Input:
Read package file as text to get name and version
### Response:
def read_file(filename):
"""Read package file as text to get name and version"""
# intentionally *not* adding an encoding option to open
# see here:
# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, 'graphql_compiler', filename), 'r') as f:
return f.read() |
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BLOB_KEYS_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_keys' for mapper input")
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
if len(blob_keys) > cls._MAX_BLOB_KEYS_COUNT:
raise BadReaderParamsError("Too many 'blob_keys' for mapper input")
if not blob_keys:
raise BadReaderParamsError("No 'blob_keys' specified for mapper input")
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
if not blob_info:
raise BadReaderParamsError("Could not find blobinfo for key %s" %
blob_key) | Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid. | Below is the the instruction that describes the task:
### Input:
Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
### Response:
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BLOB_KEYS_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_keys' for mapper input")
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
if len(blob_keys) > cls._MAX_BLOB_KEYS_COUNT:
raise BadReaderParamsError("Too many 'blob_keys' for mapper input")
if not blob_keys:
raise BadReaderParamsError("No 'blob_keys' specified for mapper input")
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
if not blob_info:
raise BadReaderParamsError("Could not find blobinfo for key %s" %
blob_key) |
def _add_zone(self, zone, name='', status=Zone.CLEAR, expander=False):
"""
Adds a zone to the internal zone list.
:param zone: zone number
:type zone: int
:param name: human readable zone name
:type name: string
:param status: zone status
:type status: int
"""
if not zone in self._zones:
self._zones[zone] = Zone(zone=zone, name=name, status=None, expander=expander)
self._update_zone(zone, status=status) | Adds a zone to the internal zone list.
:param zone: zone number
:type zone: int
:param name: human readable zone name
:type name: string
:param status: zone status
:type status: int | Below is the the instruction that describes the task:
### Input:
Adds a zone to the internal zone list.
:param zone: zone number
:type zone: int
:param name: human readable zone name
:type name: string
:param status: zone status
:type status: int
### Response:
def _add_zone(self, zone, name='', status=Zone.CLEAR, expander=False):
"""
Adds a zone to the internal zone list.
:param zone: zone number
:type zone: int
:param name: human readable zone name
:type name: string
:param status: zone status
:type status: int
"""
if not zone in self._zones:
self._zones[zone] = Zone(zone=zone, name=name, status=None, expander=expander)
self._update_zone(zone, status=status) |
def group_by_match(self, variant):
'''
Given a variant, split the PileupCollection based on whether it the
data supports the reference allele, the alternate allele, or neither.
Parameters
----------
variant : Variant
The variant. Must have fields 'locus', 'ref', and 'alt'.
Returns
----------
A MatchingEvidence named tuple with fields (ref, alt, other),
each of which is a string -> PileupCollection dict mapping alleles
to the PileupCollection of evidence supporting them.
'''
locus = to_locus(variant)
if len(variant.ref) != len(locus.positions):
logging.warning(
"Ref is length %d but locus has %d bases in variant: %s" %
(len(variant.ref), len(locus.positions), str(variant)))
alleles_dict = self.group_by_allele(locus)
single_base_loci = [
Locus.from_interbase_coordinates(locus.contig, position)
for position in locus.positions
]
empty_pileups = dict(
(locus, Pileup(locus=locus, elements=[]))
for locus in single_base_loci)
empty_collection = PileupCollection(pileups=empty_pileups, parent=self)
ref = {variant.ref: alleles_dict.pop(variant.ref, empty_collection)}
alt = {variant.alt: alleles_dict.pop(variant.alt, empty_collection)}
other = alleles_dict
# TODO: consider end of read issues for insertions
return MatchingEvidence(ref, alt, other) | Given a variant, split the PileupCollection based on whether it the
data supports the reference allele, the alternate allele, or neither.
Parameters
----------
variant : Variant
The variant. Must have fields 'locus', 'ref', and 'alt'.
Returns
----------
A MatchingEvidence named tuple with fields (ref, alt, other),
each of which is a string -> PileupCollection dict mapping alleles
to the PileupCollection of evidence supporting them. | Below is the the instruction that describes the task:
### Input:
Given a variant, split the PileupCollection based on whether it the
data supports the reference allele, the alternate allele, or neither.
Parameters
----------
variant : Variant
The variant. Must have fields 'locus', 'ref', and 'alt'.
Returns
----------
A MatchingEvidence named tuple with fields (ref, alt, other),
each of which is a string -> PileupCollection dict mapping alleles
to the PileupCollection of evidence supporting them.
### Response:
def group_by_match(self, variant):
'''
Given a variant, split the PileupCollection based on whether it the
data supports the reference allele, the alternate allele, or neither.
Parameters
----------
variant : Variant
The variant. Must have fields 'locus', 'ref', and 'alt'.
Returns
----------
A MatchingEvidence named tuple with fields (ref, alt, other),
each of which is a string -> PileupCollection dict mapping alleles
to the PileupCollection of evidence supporting them.
'''
locus = to_locus(variant)
if len(variant.ref) != len(locus.positions):
logging.warning(
"Ref is length %d but locus has %d bases in variant: %s" %
(len(variant.ref), len(locus.positions), str(variant)))
alleles_dict = self.group_by_allele(locus)
single_base_loci = [
Locus.from_interbase_coordinates(locus.contig, position)
for position in locus.positions
]
empty_pileups = dict(
(locus, Pileup(locus=locus, elements=[]))
for locus in single_base_loci)
empty_collection = PileupCollection(pileups=empty_pileups, parent=self)
ref = {variant.ref: alleles_dict.pop(variant.ref, empty_collection)}
alt = {variant.alt: alleles_dict.pop(variant.alt, empty_collection)}
other = alleles_dict
# TODO: consider end of read issues for insertions
return MatchingEvidence(ref, alt, other) |
def dec_decimal_to_sexegesimal(
self,
dec,
delimiter=":"):
"""
*Convert a declination between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``dec`` -- DEC in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace dec_to_sex in dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_decimal_to_sexegesimal(
dec="-3.454676456",
delimiter=":"
)
print dec
# OUT: -03:27:16.8
"""
self.log.info('starting the ``dec_decimal_to_sexegesimal`` method')
import math
# CONVERT DEC TO FLOAT
try:
self.log.debug("attempting to convert RA to float")
dec = float(dec)
except Exception, e:
self.log.error(
"could not convert RA to float - failed with this error: %s " % (str(e),))
return -1
# COMPLAIN IF DEC NOT BETWEEN -90 - 90
if dec > -90. and dec < 90.:
pass
else:
self.log.error(
"DEC must be between -90 - 90 degrees")
return -1
if (dec >= 0):
hemisphere = '+'
else:
hemisphere = '-'
dec *= -1
# PRECISION TEST
# 1s = .000277778 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 4
decimalLen = len(repr(dec).split(".")[-1])
precision = decimalLen - 4
dec_deg = int(dec)
dec_mm = int((dec - dec_deg) * 60)
dec_ss = int(((dec - dec_deg) * 60 - dec_mm) * 60)
dec_f = (((dec - dec_deg) * 60 - dec_mm) * 60) - dec_ss
# SET PRECISION
dec_f = repr(dec_f)[2:]
dec_f = dec_f[:precision]
if len(dec_f):
dec_f = "." + dec_f
if precision < 0:
dec_f = ""
sexegesimal = hemisphere + '%02d' % dec_deg + delimiter + \
'%02d' % dec_mm + delimiter + '%02d' % dec_ss + dec_f
self.log.info('completed the ``dec_decimal_to_sexegesimal`` method')
return sexegesimal | *Convert a declination between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``dec`` -- DEC in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace dec_to_sex in dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_decimal_to_sexegesimal(
dec="-3.454676456",
delimiter=":"
)
print dec
# OUT: -03:27:16.8 | Below is the the instruction that describes the task:
### Input:
*Convert a declination between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``dec`` -- DEC in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace dec_to_sex in dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_decimal_to_sexegesimal(
dec="-3.454676456",
delimiter=":"
)
print dec
# OUT: -03:27:16.8
### Response:
def dec_decimal_to_sexegesimal(
self,
dec,
delimiter=":"):
"""
*Convert a declination between decimal degrees and sexegesimal.*
Precision should be respected.
**Key Arguments:**
- ``dec`` -- DEC in decimal degrees. Will try and convert to float before performing calculation.
- ``delimiter`` -- how to delimit the RA units. Default *:*
**Return:**
- ``sexegesimal`` -- ra in sexegesimal units
**Usage:**
.. todo::
- replace dec_to_sex in dryxPython in all code
.. code-block:: python
from astrocalc.coords import unit_conversion
converter = unit_conversion(
log=log
)
dec = converter.dec_decimal_to_sexegesimal(
dec="-3.454676456",
delimiter=":"
)
print dec
# OUT: -03:27:16.8
"""
self.log.info('starting the ``dec_decimal_to_sexegesimal`` method')
import math
# CONVERT DEC TO FLOAT
try:
self.log.debug("attempting to convert RA to float")
dec = float(dec)
except Exception, e:
self.log.error(
"could not convert RA to float - failed with this error: %s " % (str(e),))
return -1
# COMPLAIN IF DEC NOT BETWEEN -90 - 90
if dec > -90. and dec < 90.:
pass
else:
self.log.error(
"DEC must be between -90 - 90 degrees")
return -1
if (dec >= 0):
hemisphere = '+'
else:
hemisphere = '-'
dec *= -1
# PRECISION TEST
# 1s = .000277778 DEGREE
# THEREFORE REPORT SECONDS TO A PRECISION = INPUT PRECISION - 4
decimalLen = len(repr(dec).split(".")[-1])
precision = decimalLen - 4
dec_deg = int(dec)
dec_mm = int((dec - dec_deg) * 60)
dec_ss = int(((dec - dec_deg) * 60 - dec_mm) * 60)
dec_f = (((dec - dec_deg) * 60 - dec_mm) * 60) - dec_ss
# SET PRECISION
dec_f = repr(dec_f)[2:]
dec_f = dec_f[:precision]
if len(dec_f):
dec_f = "." + dec_f
if precision < 0:
dec_f = ""
sexegesimal = hemisphere + '%02d' % dec_deg + delimiter + \
'%02d' % dec_mm + delimiter + '%02d' % dec_ss + dec_f
self.log.info('completed the ``dec_decimal_to_sexegesimal`` method')
return sexegesimal |
def penalize_boundary_complexity(shp, w=20, mask=None, C=0.5):
"""Encourage the boundaries of an image to have less variation and of color C.
Args:
shp: shape of T("input") because this may not be known.
w: width of boundary to penalize. Ignored if mask is set.
mask: mask describing what area should be penalized.
Returns:
Objective.
"""
def inner(T):
arr = T("input")
# print shp
if mask is None:
mask_ = np.ones(shp)
mask_[:, w:-w, w:-w] = 0
else:
mask_ = mask
blur = _tf_blur(arr, w=5)
diffs = (blur-arr)**2
diffs += 0.8*(arr-C)**2
return -tf.reduce_sum(diffs*mask_)
return inner | Encourage the boundaries of an image to have less variation and of color C.
Args:
shp: shape of T("input") because this may not be known.
w: width of boundary to penalize. Ignored if mask is set.
mask: mask describing what area should be penalized.
Returns:
Objective. | Below is the the instruction that describes the task:
### Input:
Encourage the boundaries of an image to have less variation and of color C.
Args:
shp: shape of T("input") because this may not be known.
w: width of boundary to penalize. Ignored if mask is set.
mask: mask describing what area should be penalized.
Returns:
Objective.
### Response:
def penalize_boundary_complexity(shp, w=20, mask=None, C=0.5):
"""Encourage the boundaries of an image to have less variation and of color C.
Args:
shp: shape of T("input") because this may not be known.
w: width of boundary to penalize. Ignored if mask is set.
mask: mask describing what area should be penalized.
Returns:
Objective.
"""
def inner(T):
arr = T("input")
# print shp
if mask is None:
mask_ = np.ones(shp)
mask_[:, w:-w, w:-w] = 0
else:
mask_ = mask
blur = _tf_blur(arr, w=5)
diffs = (blur-arr)**2
diffs += 0.8*(arr-C)**2
return -tf.reduce_sum(diffs*mask_)
return inner |
def _calc_sizes(self, cnv_file, items):
"""Retrieve target and antitarget bin sizes based on depth.
Similar to CNVkit's do_autobin but tries to have a standard set of
ranges (50bp intervals for target and 10kb intervals for antitarget).
"""
bp_per_bin = 100000 # same target as CNVkit
range_map = {"target": (100, 250), "antitarget": (10000, 1000000)}
target_bps = []
anti_bps = []
checked_beds = set([])
for data in items:
region_bed = tz.get_in(["depth", "variant_regions", "regions"], data)
if region_bed and region_bed not in checked_beds:
with utils.open_gzipsafe(region_bed) as in_handle:
for r in pybedtools.BedTool(in_handle).intersect(cnv_file):
if r.stop - r.start > range_map["target"][0]:
target_bps.append(float(r.name))
with utils.open_gzipsafe(region_bed) as in_handle:
for r in pybedtools.BedTool(in_handle).intersect(cnv_file, v=True):
if r.stop - r.start > range_map["target"][1]:
anti_bps.append(float(r.name))
checked_beds.add(region_bed)
def scale_in_boundary(raw, round_interval, range_targets):
min_val, max_val = range_targets
out = int(math.ceil(raw / float(round_interval)) * round_interval)
if out > max_val:
return max_val
elif out < min_val:
return min_val
else:
return out
if target_bps and np.median(target_bps) > 0:
raw_target_bin = bp_per_bin / float(np.median(target_bps))
target_bin = scale_in_boundary(raw_target_bin, 50, range_map["target"])
else:
target_bin = range_map["target"][1]
if anti_bps and np.median(anti_bps) > 0:
raw_anti_bin = bp_per_bin / float(np.median(anti_bps))
anti_bin = scale_in_boundary(raw_anti_bin, 10000, range_map["antitarget"])
else:
anti_bin = range_map["antitarget"][1]
return target_bin, anti_bin | Retrieve target and antitarget bin sizes based on depth.
Similar to CNVkit's do_autobin but tries to have a standard set of
ranges (50bp intervals for target and 10kb intervals for antitarget). | Below is the the instruction that describes the task:
### Input:
Retrieve target and antitarget bin sizes based on depth.
Similar to CNVkit's do_autobin but tries to have a standard set of
ranges (50bp intervals for target and 10kb intervals for antitarget).
### Response:
def _calc_sizes(self, cnv_file, items):
"""Retrieve target and antitarget bin sizes based on depth.
Similar to CNVkit's do_autobin but tries to have a standard set of
ranges (50bp intervals for target and 10kb intervals for antitarget).
"""
bp_per_bin = 100000 # same target as CNVkit
range_map = {"target": (100, 250), "antitarget": (10000, 1000000)}
target_bps = []
anti_bps = []
checked_beds = set([])
for data in items:
region_bed = tz.get_in(["depth", "variant_regions", "regions"], data)
if region_bed and region_bed not in checked_beds:
with utils.open_gzipsafe(region_bed) as in_handle:
for r in pybedtools.BedTool(in_handle).intersect(cnv_file):
if r.stop - r.start > range_map["target"][0]:
target_bps.append(float(r.name))
with utils.open_gzipsafe(region_bed) as in_handle:
for r in pybedtools.BedTool(in_handle).intersect(cnv_file, v=True):
if r.stop - r.start > range_map["target"][1]:
anti_bps.append(float(r.name))
checked_beds.add(region_bed)
def scale_in_boundary(raw, round_interval, range_targets):
min_val, max_val = range_targets
out = int(math.ceil(raw / float(round_interval)) * round_interval)
if out > max_val:
return max_val
elif out < min_val:
return min_val
else:
return out
if target_bps and np.median(target_bps) > 0:
raw_target_bin = bp_per_bin / float(np.median(target_bps))
target_bin = scale_in_boundary(raw_target_bin, 50, range_map["target"])
else:
target_bin = range_map["target"][1]
if anti_bps and np.median(anti_bps) > 0:
raw_anti_bin = bp_per_bin / float(np.median(anti_bps))
anti_bin = scale_in_boundary(raw_anti_bin, 10000, range_map["antitarget"])
else:
anti_bin = range_map["antitarget"][1]
return target_bin, anti_bin |
def to_timestamp(val):
"""
Takes a value that is either a Python date, datetime, or a string
representation of a date/datetime value. Returns a standard Unix timestamp
corresponding to that value.
"""
# If we're given a number, give it right back - it's already a timestamp.
if isinstance(val, numbers.Number):
return val
elif isinstance(val, six.string_types):
dt = _parse_datetime_string(val)
else:
dt = val
return time.mktime(dt.timetuple()) | Takes a value that is either a Python date, datetime, or a string
representation of a date/datetime value. Returns a standard Unix timestamp
corresponding to that value. | Below is the the instruction that describes the task:
### Input:
Takes a value that is either a Python date, datetime, or a string
representation of a date/datetime value. Returns a standard Unix timestamp
corresponding to that value.
### Response:
def to_timestamp(val):
"""
Takes a value that is either a Python date, datetime, or a string
representation of a date/datetime value. Returns a standard Unix timestamp
corresponding to that value.
"""
# If we're given a number, give it right back - it's already a timestamp.
if isinstance(val, numbers.Number):
return val
elif isinstance(val, six.string_types):
dt = _parse_datetime_string(val)
else:
dt = val
return time.mktime(dt.timetuple()) |
def setValues(nxG, nyG, iBeg, iEnd, jBeg, jEnd, data):
"""
Set setValues
@param nxG number of global cells in x
@param nyG number of global cells in y
@param iBeg global starting index in x
@param iEnd global ending index in x
@param jBeg global starting index in y
@param jEnd global ending index in y
@param data local array
"""
nxGHalf = nxG/2.
nyGHalf = nyG/2.
nxGQuart = nxGHalf/2.
nyGQuart = nyGHalf/2.
for i in range(data.shape[0]):
iG = iBeg + i
di = iG - nxG
for j in range(data.shape[1]):
jG = jBeg + j
dj = jG - 0.8*nyG
data[i, j] = numpy.floor(1.9*numpy.exp(-di**2/nxGHalf**2 - dj**2/nyGHalf**2)) | Set setValues
@param nxG number of global cells in x
@param nyG number of global cells in y
@param iBeg global starting index in x
@param iEnd global ending index in x
@param jBeg global starting index in y
@param jEnd global ending index in y
@param data local array | Below is the the instruction that describes the task:
### Input:
Set setValues
@param nxG number of global cells in x
@param nyG number of global cells in y
@param iBeg global starting index in x
@param iEnd global ending index in x
@param jBeg global starting index in y
@param jEnd global ending index in y
@param data local array
### Response:
def setValues(nxG, nyG, iBeg, iEnd, jBeg, jEnd, data):
"""
Set setValues
@param nxG number of global cells in x
@param nyG number of global cells in y
@param iBeg global starting index in x
@param iEnd global ending index in x
@param jBeg global starting index in y
@param jEnd global ending index in y
@param data local array
"""
nxGHalf = nxG/2.
nyGHalf = nyG/2.
nxGQuart = nxGHalf/2.
nyGQuart = nyGHalf/2.
for i in range(data.shape[0]):
iG = iBeg + i
di = iG - nxG
for j in range(data.shape[1]):
jG = jBeg + j
dj = jG - 0.8*nyG
data[i, j] = numpy.floor(1.9*numpy.exp(-di**2/nxGHalf**2 - dj**2/nyGHalf**2)) |
def deconstruct(self, including_private: bool=False) -> bytes:
"""Return state of this FinTSClient instance as an opaque datablob. You should not
use this object after calling this method.
Information about the connection is implicitly retrieved from the bank and
cached in the FinTSClient. This includes: system identifier, bank parameter
data, user parameter data. It's not strictly required to retain this information
across sessions, but beneficial. If possible, an API user SHOULD use this method
to serialize the client instance before destroying it, and provide the serialized
data next time an instance is constructed.
Parameter `including_private` should be set to True, if the storage is sufficiently
secure (with regards to confidentiality) to include private data, specifically,
account numbers and names. Most often this is the case.
Note: No connection information is stored in the datablob, neither is the PIN.
"""
data = self._deconstruct_v1(including_private=including_private)
return compress_datablob(DATA_BLOB_MAGIC, 1, data) | Return state of this FinTSClient instance as an opaque datablob. You should not
use this object after calling this method.
Information about the connection is implicitly retrieved from the bank and
cached in the FinTSClient. This includes: system identifier, bank parameter
data, user parameter data. It's not strictly required to retain this information
across sessions, but beneficial. If possible, an API user SHOULD use this method
to serialize the client instance before destroying it, and provide the serialized
data next time an instance is constructed.
Parameter `including_private` should be set to True, if the storage is sufficiently
secure (with regards to confidentiality) to include private data, specifically,
account numbers and names. Most often this is the case.
Note: No connection information is stored in the datablob, neither is the PIN. | Below is the the instruction that describes the task:
### Input:
Return state of this FinTSClient instance as an opaque datablob. You should not
use this object after calling this method.
Information about the connection is implicitly retrieved from the bank and
cached in the FinTSClient. This includes: system identifier, bank parameter
data, user parameter data. It's not strictly required to retain this information
across sessions, but beneficial. If possible, an API user SHOULD use this method
to serialize the client instance before destroying it, and provide the serialized
data next time an instance is constructed.
Parameter `including_private` should be set to True, if the storage is sufficiently
secure (with regards to confidentiality) to include private data, specifically,
account numbers and names. Most often this is the case.
Note: No connection information is stored in the datablob, neither is the PIN.
### Response:
def deconstruct(self, including_private: bool=False) -> bytes:
"""Return state of this FinTSClient instance as an opaque datablob. You should not
use this object after calling this method.
Information about the connection is implicitly retrieved from the bank and
cached in the FinTSClient. This includes: system identifier, bank parameter
data, user parameter data. It's not strictly required to retain this information
across sessions, but beneficial. If possible, an API user SHOULD use this method
to serialize the client instance before destroying it, and provide the serialized
data next time an instance is constructed.
Parameter `including_private` should be set to True, if the storage is sufficiently
secure (with regards to confidentiality) to include private data, specifically,
account numbers and names. Most often this is the case.
Note: No connection information is stored in the datablob, neither is the PIN.
"""
data = self._deconstruct_v1(including_private=including_private)
return compress_datablob(DATA_BLOB_MAGIC, 1, data) |
def metric_delete(self, project, metric_name):
"""API call: delete a metric resource.
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric
"""
path = "projects/%s/metrics/%s" % (project, metric_name)
self._gapic_api.delete_log_metric(path) | API call: delete a metric resource.
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric | Below is the the instruction that describes the task:
### Input:
API call: delete a metric resource.
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric
### Response:
def metric_delete(self, project, metric_name):
"""API call: delete a metric resource.
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric
"""
path = "projects/%s/metrics/%s" % (project, metric_name)
self._gapic_api.delete_log_metric(path) |
def export_default_probes(path, module_name = '', raise_errors = False):
"""
NOT IMPLEMENTED YET
tries to instantiate all the instruments that are imported in /instruments/__init__.py
and the probes of each instrument that could be instantiated into a .b26 file in the folder path
Args:
path: target path for .b26 files
"""
raise NotImplementedError
import b26_toolkit.b26_toolkit.instruments as instruments
from pylabcontrol.core import Probe
for name, obj in inspect.getmembers(instruments):
if inspect.isclass(obj):
try:
instrument = obj()
print(('--- created ', obj.__name__, ' -- '))
for probe_name, probe_info in instrument._PROBES.items():
probe = Probe(instrument, probe_name, info = probe_info)
filename = os.path.join(path, '{:s}.b26'.format(instrument.name))
probe.save(filename)
except:
print(('failed to create probe file for: {:s}'.format(obj.__name__)))
print(('failed to create probe file for: {:s}'.format(obj.__name__))) | NOT IMPLEMENTED YET
tries to instantiate all the instruments that are imported in /instruments/__init__.py
and the probes of each instrument that could be instantiated into a .b26 file in the folder path
Args:
path: target path for .b26 files | Below is the the instruction that describes the task:
### Input:
NOT IMPLEMENTED YET
tries to instantiate all the instruments that are imported in /instruments/__init__.py
and the probes of each instrument that could be instantiated into a .b26 file in the folder path
Args:
path: target path for .b26 files
### Response:
def export_default_probes(path, module_name = '', raise_errors = False):
"""
NOT IMPLEMENTED YET
tries to instantiate all the instruments that are imported in /instruments/__init__.py
and the probes of each instrument that could be instantiated into a .b26 file in the folder path
Args:
path: target path for .b26 files
"""
raise NotImplementedError
import b26_toolkit.b26_toolkit.instruments as instruments
from pylabcontrol.core import Probe
for name, obj in inspect.getmembers(instruments):
if inspect.isclass(obj):
try:
instrument = obj()
print(('--- created ', obj.__name__, ' -- '))
for probe_name, probe_info in instrument._PROBES.items():
probe = Probe(instrument, probe_name, info = probe_info)
filename = os.path.join(path, '{:s}.b26'.format(instrument.name))
probe.save(filename)
except:
print(('failed to create probe file for: {:s}'.format(obj.__name__)))
print(('failed to create probe file for: {:s}'.format(obj.__name__))) |
def init_parsecmdline(argv=[]):
"""
Parse arguments from the command line
:param argv: list of arguments
"""
# main argument parser
parser = argparse.ArgumentParser(prog=PKG_NAME)
# --version
parser.add_argument('--version', action='version', version=version)
# -c, --config <file_name>
parser.add_argument("-c", "--config",
action="store",
dest="config_file", default=config.CONF_DEFAULT_FILE,
help="specify configuration file to use")
# --dry-run
parser.add_argument("-d", "--dry-run",
action="store_true", dest="dry_run", default=False,
help="don't actually do anything")
# --quiet
parser.add_argument("-q", "--quiet",
action="store_true", dest="log_quiet", default=False,
help="quiet output")
# --ll <level>
# logging level
parser.add_argument("--ll", "--log-level",
action="store", type=int,
dest="log_lvl", default=log.LOG_LVL_DEFAULT,
help="set logging level")
# -l, --log-file
parser.add_argument("-l", "--log-file",
action="store",
dest="log_file", default=log.LOG_FILE_DEFAULT,
help="set log file")
# Absorb the options
options = parser.parse_args(argv)
# Set whether we are going to perform a dry run
global _opt
_opt["dry_run"] = options.dry_run
# Initiate the log level
log.init(threshold_lvl=options.log_lvl,
quiet_stdout=options.log_quiet, log_file=options.log_file)
#
# Print the splash
#
_splash()
# Merge configuration with a JSON file
config_file = os.path.abspath(options.config_file)
log.msg("Attempting to use configuration file '{config_file}'"
.format(config_file=config_file))
try:
config.set_from_file(config_file)
except FileNotFoundError:
raise FileNotFoundError("Configuration file '{config_file}' not found!"
.format(config_file=config_file)) | Parse arguments from the command line
:param argv: list of arguments | Below is the the instruction that describes the task:
### Input:
Parse arguments from the command line
:param argv: list of arguments
### Response:
def init_parsecmdline(argv=[]):
"""
Parse arguments from the command line
:param argv: list of arguments
"""
# main argument parser
parser = argparse.ArgumentParser(prog=PKG_NAME)
# --version
parser.add_argument('--version', action='version', version=version)
# -c, --config <file_name>
parser.add_argument("-c", "--config",
action="store",
dest="config_file", default=config.CONF_DEFAULT_FILE,
help="specify configuration file to use")
# --dry-run
parser.add_argument("-d", "--dry-run",
action="store_true", dest="dry_run", default=False,
help="don't actually do anything")
# --quiet
parser.add_argument("-q", "--quiet",
action="store_true", dest="log_quiet", default=False,
help="quiet output")
# --ll <level>
# logging level
parser.add_argument("--ll", "--log-level",
action="store", type=int,
dest="log_lvl", default=log.LOG_LVL_DEFAULT,
help="set logging level")
# -l, --log-file
parser.add_argument("-l", "--log-file",
action="store",
dest="log_file", default=log.LOG_FILE_DEFAULT,
help="set log file")
# Absorb the options
options = parser.parse_args(argv)
# Set whether we are going to perform a dry run
global _opt
_opt["dry_run"] = options.dry_run
# Initiate the log level
log.init(threshold_lvl=options.log_lvl,
quiet_stdout=options.log_quiet, log_file=options.log_file)
#
# Print the splash
#
_splash()
# Merge configuration with a JSON file
config_file = os.path.abspath(options.config_file)
log.msg("Attempting to use configuration file '{config_file}'"
.format(config_file=config_file))
try:
config.set_from_file(config_file)
except FileNotFoundError:
raise FileNotFoundError("Configuration file '{config_file}' not found!"
.format(config_file=config_file)) |
def comment_magic(source, language='python', global_escape_flag=True):
"""Escape Jupyter magics with '# '"""
parser = StringParser(language)
next_is_magic = False
for pos, line in enumerate(source):
if not parser.is_quoted() and (next_is_magic or is_magic(line, language, global_escape_flag)):
source[pos] = _COMMENT[language] + ' ' + line
next_is_magic = language == 'python' and _LINE_CONTINUATION_RE.match(line)
parser.read_line(line)
return source | Escape Jupyter magics with '# | Below is the the instruction that describes the task:
### Input:
Escape Jupyter magics with '#
### Response:
def comment_magic(source, language='python', global_escape_flag=True):
"""Escape Jupyter magics with '# '"""
parser = StringParser(language)
next_is_magic = False
for pos, line in enumerate(source):
if not parser.is_quoted() and (next_is_magic or is_magic(line, language, global_escape_flag)):
source[pos] = _COMMENT[language] + ' ' + line
next_is_magic = language == 'python' and _LINE_CONTINUATION_RE.match(line)
parser.read_line(line)
return source |
def put(self, items, indexes=True):
'''Adds feature collections to the store.
This efficiently adds multiple FCs to the store. The iterable
of ``items`` given should yield tuples of ``(content_id, FC)``.
:param items: Iterable of ``(content_id, FC)``.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
'''
actions = []
for cid, fc in items:
# TODO: If we store features in a columnar order, then we
# could tell ES to index the feature values directly. ---AG
# (But is problematic because we want to preserve the ability
# to selectively index FCs. So we'd probably need two distinct
# doc types.)
idxs = defaultdict(list)
if indexes:
for fname in self.indexed_features:
if fname in fc:
idxs[fname_to_idx_name(fname)].extend(fc[fname])
for fname in self.fulltext_indexed_features:
if fname not in fc:
continue
if isinstance(fc[fname], basestring):
idxs[fname_to_full_idx_name(fname)] = fc[fname]
else:
idxs[fname_to_full_idx_name(fname)].extend(fc[fname])
actions.append({
'_index': self.index,
'_type': self.type,
'_id': eid(cid),
'_op_type': 'index',
'_source': dict(idxs, **{
'fc': self.fc_to_dict(fc),
}),
})
bulk(self.conn, actions, timeout=60, request_timeout=60) | Adds feature collections to the store.
This efficiently adds multiple FCs to the store. The iterable
of ``items`` given should yield tuples of ``(content_id, FC)``.
:param items: Iterable of ``(content_id, FC)``.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed. | Below is the the instruction that describes the task:
### Input:
Adds feature collections to the store.
This efficiently adds multiple FCs to the store. The iterable
of ``items`` given should yield tuples of ``(content_id, FC)``.
:param items: Iterable of ``(content_id, FC)``.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
### Response:
def put(self, items, indexes=True):
'''Adds feature collections to the store.
This efficiently adds multiple FCs to the store. The iterable
of ``items`` given should yield tuples of ``(content_id, FC)``.
:param items: Iterable of ``(content_id, FC)``.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
'''
actions = []
for cid, fc in items:
# TODO: If we store features in a columnar order, then we
# could tell ES to index the feature values directly. ---AG
# (But is problematic because we want to preserve the ability
# to selectively index FCs. So we'd probably need two distinct
# doc types.)
idxs = defaultdict(list)
if indexes:
for fname in self.indexed_features:
if fname in fc:
idxs[fname_to_idx_name(fname)].extend(fc[fname])
for fname in self.fulltext_indexed_features:
if fname not in fc:
continue
if isinstance(fc[fname], basestring):
idxs[fname_to_full_idx_name(fname)] = fc[fname]
else:
idxs[fname_to_full_idx_name(fname)].extend(fc[fname])
actions.append({
'_index': self.index,
'_type': self.type,
'_id': eid(cid),
'_op_type': 'index',
'_source': dict(idxs, **{
'fc': self.fc_to_dict(fc),
}),
})
bulk(self.conn, actions, timeout=60, request_timeout=60) |
def _build_connection_args(self, klass):
"""Read connection args spec, exclude self from list of possible
:param klass: Redis connection class.
"""
bases = [base for base in klass.__bases__ if base is not object]
all_args = []
for cls in [klass] + bases:
try:
args = inspect.getfullargspec(cls.__init__).args
except AttributeError:
args = inspect.getargspec(cls.__init__).args
for arg in args:
if arg in all_args:
continue
all_args.append(arg)
all_args.remove('self')
return all_args | Read connection args spec, exclude self from list of possible
:param klass: Redis connection class. | Below is the the instruction that describes the task:
### Input:
Read connection args spec, exclude self from list of possible
:param klass: Redis connection class.
### Response:
def _build_connection_args(self, klass):
"""Read connection args spec, exclude self from list of possible
:param klass: Redis connection class.
"""
bases = [base for base in klass.__bases__ if base is not object]
all_args = []
for cls in [klass] + bases:
try:
args = inspect.getfullargspec(cls.__init__).args
except AttributeError:
args = inspect.getargspec(cls.__init__).args
for arg in args:
if arg in all_args:
continue
all_args.append(arg)
all_args.remove('self')
return all_args |
def set_target_temperature(self, temperature, mode=config.SCHEDULE_HOLD):
"""
Updates the target temperature on the NuHeat API
:param temperature: The desired temperature in NuHeat format
:param permanent: Permanently hold the temperature. If set to False, the schedule will
resume at the next programmed event
"""
if temperature < self.min_temperature:
temperature = self.min_temperature
if temperature > self.max_temperature:
temperature = self.max_temperature
modes = [config.SCHEDULE_TEMPORARY_HOLD, config.SCHEDULE_HOLD]
if mode not in modes:
raise Exception("Invalid mode. Please use one of: {}".format(modes))
self.set_data({
"SetPointTemp": temperature,
"ScheduleMode": mode
}) | Updates the target temperature on the NuHeat API
:param temperature: The desired temperature in NuHeat format
:param permanent: Permanently hold the temperature. If set to False, the schedule will
resume at the next programmed event | Below is the the instruction that describes the task:
### Input:
Updates the target temperature on the NuHeat API
:param temperature: The desired temperature in NuHeat format
:param permanent: Permanently hold the temperature. If set to False, the schedule will
resume at the next programmed event
### Response:
def set_target_temperature(self, temperature, mode=config.SCHEDULE_HOLD):
"""
Updates the target temperature on the NuHeat API
:param temperature: The desired temperature in NuHeat format
:param permanent: Permanently hold the temperature. If set to False, the schedule will
resume at the next programmed event
"""
if temperature < self.min_temperature:
temperature = self.min_temperature
if temperature > self.max_temperature:
temperature = self.max_temperature
modes = [config.SCHEDULE_TEMPORARY_HOLD, config.SCHEDULE_HOLD]
if mode not in modes:
raise Exception("Invalid mode. Please use one of: {}".format(modes))
self.set_data({
"SetPointTemp": temperature,
"ScheduleMode": mode
}) |
def surface_evaluate_cartesian_multi(surface, points):
"""Image for :meth`.Surface.evaluate_cartesian_multi` docstring."""
if NO_IMAGES:
return
ax = surface.plot(256)
ax.plot(
points[0, :], points[1, :], color="black", linestyle="None", marker="o"
)
delta = 1.0 / 32.0
font_size = 18
ax.text(
points[0, 0],
points[1, 0],
r"$w_0$",
fontsize=font_size,
verticalalignment="top",
horizontalalignment="right",
)
ax.text(
points[0, 1] + 2 * delta,
points[1, 1],
r"$w_1$",
fontsize=font_size,
verticalalignment="center",
horizontalalignment="left",
)
ax.text(
points[0, 2],
points[1, 2] + delta,
r"$w_2$",
fontsize=font_size,
verticalalignment="bottom",
horizontalalignment="left",
)
ax.axis("scaled")
ax.set_xlim(-3.125, 2.375)
ax.set_ylim(-0.25, 2.125)
save_image(ax.figure, "surface_evaluate_cartesian_multi.png") | Image for :meth`.Surface.evaluate_cartesian_multi` docstring. | Below is the the instruction that describes the task:
### Input:
Image for :meth`.Surface.evaluate_cartesian_multi` docstring.
### Response:
def surface_evaluate_cartesian_multi(surface, points):
"""Image for :meth`.Surface.evaluate_cartesian_multi` docstring."""
if NO_IMAGES:
return
ax = surface.plot(256)
ax.plot(
points[0, :], points[1, :], color="black", linestyle="None", marker="o"
)
delta = 1.0 / 32.0
font_size = 18
ax.text(
points[0, 0],
points[1, 0],
r"$w_0$",
fontsize=font_size,
verticalalignment="top",
horizontalalignment="right",
)
ax.text(
points[0, 1] + 2 * delta,
points[1, 1],
r"$w_1$",
fontsize=font_size,
verticalalignment="center",
horizontalalignment="left",
)
ax.text(
points[0, 2],
points[1, 2] + delta,
r"$w_2$",
fontsize=font_size,
verticalalignment="bottom",
horizontalalignment="left",
)
ax.axis("scaled")
ax.set_xlim(-3.125, 2.375)
ax.set_ylim(-0.25, 2.125)
save_image(ax.figure, "surface_evaluate_cartesian_multi.png") |
def _row_should_be_placed(self, row, position):
""":return: whether to place this instruction"""
placed_row = self._rows_in_grid.get(row)
return placed_row is None or placed_row.y < position.y | :return: whether to place this instruction | Below is the the instruction that describes the task:
### Input:
:return: whether to place this instruction
### Response:
def _row_should_be_placed(self, row, position):
""":return: whether to place this instruction"""
placed_row = self._rows_in_grid.get(row)
return placed_row is None or placed_row.y < position.y |
def correlation(P, obs1, obs2=None, times=[1], k=None):
r"""Time-correlation for equilibrium experiment.
Parameters
----------
P : (M, M) ndarray
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : list of int (optional)
List of times (in tau) at which to compute correlation
k : int (optional)
Number of eigenvectors and eigenvalues to use for computation
Returns
-------
correlations : ndarray
Correlation values at given times
"""
M = P.shape[0]
T = np.asarray(times).max()
if T < M:
return correlation_matvec(P, obs1, obs2=obs2, times=times)
else:
return correlation_decomp(P, obs1, obs2=obs2, times=times, k=k) | r"""Time-correlation for equilibrium experiment.
Parameters
----------
P : (M, M) ndarray
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : list of int (optional)
List of times (in tau) at which to compute correlation
k : int (optional)
Number of eigenvectors and eigenvalues to use for computation
Returns
-------
correlations : ndarray
Correlation values at given times | Below is the the instruction that describes the task:
### Input:
r"""Time-correlation for equilibrium experiment.
Parameters
----------
P : (M, M) ndarray
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : list of int (optional)
List of times (in tau) at which to compute correlation
k : int (optional)
Number of eigenvectors and eigenvalues to use for computation
Returns
-------
correlations : ndarray
Correlation values at given times
### Response:
def correlation(P, obs1, obs2=None, times=[1], k=None):
r"""Time-correlation for equilibrium experiment.
Parameters
----------
P : (M, M) ndarray
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : list of int (optional)
List of times (in tau) at which to compute correlation
k : int (optional)
Number of eigenvectors and eigenvalues to use for computation
Returns
-------
correlations : ndarray
Correlation values at given times
"""
M = P.shape[0]
T = np.asarray(times).max()
if T < M:
return correlation_matvec(P, obs1, obs2=obs2, times=times)
else:
return correlation_decomp(P, obs1, obs2=obs2, times=times, k=k) |
def query(self):
"""
Builds the query for this quick filter.
:return <orb.Query>
"""
output = Query()
for column, op, plugin, editor in self._plugins:
query = Query(column)
if plugin.setupQuery(query, op, editor):
output &= query
return output | Builds the query for this quick filter.
:return <orb.Query> | Below is the the instruction that describes the task:
### Input:
Builds the query for this quick filter.
:return <orb.Query>
### Response:
def query(self):
"""
Builds the query for this quick filter.
:return <orb.Query>
"""
output = Query()
for column, op, plugin, editor in self._plugins:
query = Query(column)
if plugin.setupQuery(query, op, editor):
output &= query
return output |
def discover():
"""
Import all experiments listed in *_PLUGINS_REPORTS.
Tests:
>>> from benchbuild.settings import CFG
>>> from benchbuild.reports import discover
>>> import logging as lg
>>> import sys
>>> l = lg.getLogger('benchbuild')
>>> l.setLevel(lg.DEBUG)
>>> l.handlers = [lg.StreamHandler(stream=sys.stdout)]
>>> CFG["plugins"]["reports"] = ["benchbuild.non.existing", "benchbuild.reports.raw"]
>>> discover()
Could not find 'benchbuild.non.existing'
Found report: benchbuild.reports.raw
"""
if CFG["plugins"]["autoload"]:
report_plugins = CFG["plugins"]["reports"].value
for plugin in report_plugins:
try:
importlib.import_module(plugin)
LOG.debug("Found report: %s", plugin)
except ImportError:
LOG.error("Could not find '%s'", plugin) | Import all experiments listed in *_PLUGINS_REPORTS.
Tests:
>>> from benchbuild.settings import CFG
>>> from benchbuild.reports import discover
>>> import logging as lg
>>> import sys
>>> l = lg.getLogger('benchbuild')
>>> l.setLevel(lg.DEBUG)
>>> l.handlers = [lg.StreamHandler(stream=sys.stdout)]
>>> CFG["plugins"]["reports"] = ["benchbuild.non.existing", "benchbuild.reports.raw"]
>>> discover()
Could not find 'benchbuild.non.existing'
Found report: benchbuild.reports.raw | Below is the the instruction that describes the task:
### Input:
Import all experiments listed in *_PLUGINS_REPORTS.
Tests:
>>> from benchbuild.settings import CFG
>>> from benchbuild.reports import discover
>>> import logging as lg
>>> import sys
>>> l = lg.getLogger('benchbuild')
>>> l.setLevel(lg.DEBUG)
>>> l.handlers = [lg.StreamHandler(stream=sys.stdout)]
>>> CFG["plugins"]["reports"] = ["benchbuild.non.existing", "benchbuild.reports.raw"]
>>> discover()
Could not find 'benchbuild.non.existing'
Found report: benchbuild.reports.raw
### Response:
def discover():
"""
Import all experiments listed in *_PLUGINS_REPORTS.
Tests:
>>> from benchbuild.settings import CFG
>>> from benchbuild.reports import discover
>>> import logging as lg
>>> import sys
>>> l = lg.getLogger('benchbuild')
>>> l.setLevel(lg.DEBUG)
>>> l.handlers = [lg.StreamHandler(stream=sys.stdout)]
>>> CFG["plugins"]["reports"] = ["benchbuild.non.existing", "benchbuild.reports.raw"]
>>> discover()
Could not find 'benchbuild.non.existing'
Found report: benchbuild.reports.raw
"""
if CFG["plugins"]["autoload"]:
report_plugins = CFG["plugins"]["reports"].value
for plugin in report_plugins:
try:
importlib.import_module(plugin)
LOG.debug("Found report: %s", plugin)
except ImportError:
LOG.error("Could not find '%s'", plugin) |
def remove_known_hosts(overcloud_ip):
"""For a given IP address remove SSH keys from the known_hosts file"""
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command) | For a given IP address remove SSH keys from the known_hosts file | Below is the the instruction that describes the task:
### Input:
For a given IP address remove SSH keys from the known_hosts file
### Response:
def remove_known_hosts(overcloud_ip):
"""For a given IP address remove SSH keys from the known_hosts file"""
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command) |
def haslayer(self, cls):
"""Specific: NTPHeader().haslayer(NTP) should return True."""
if cls == "NTP":
if isinstance(self, NTP):
return True
elif issubtype(cls, NTP):
if isinstance(self, cls):
return True
return super(NTP, self).haslayer(cls) | Specific: NTPHeader().haslayer(NTP) should return True. | Below is the the instruction that describes the task:
### Input:
Specific: NTPHeader().haslayer(NTP) should return True.
### Response:
def haslayer(self, cls):
"""Specific: NTPHeader().haslayer(NTP) should return True."""
if cls == "NTP":
if isinstance(self, NTP):
return True
elif issubtype(cls, NTP):
if isinstance(self, cls):
return True
return super(NTP, self).haslayer(cls) |
def request_permission(cls, permissions):
""" Requests permission and returns an future result that returns a
boolean indicating if all the given permission were granted or denied.
"""
app = AndroidApplication.instance()
f = app.create_future()
def on_result(perms):
allowed = True
for p in permissions:
allowed = allowed and perms.get(p, False)
f.set_result(allowed)
app.request_permissions(permissions).then(on_result)
return f | Requests permission and returns an future result that returns a
boolean indicating if all the given permission were granted or denied. | Below is the the instruction that describes the task:
### Input:
Requests permission and returns an future result that returns a
boolean indicating if all the given permission were granted or denied.
### Response:
def request_permission(cls, permissions):
""" Requests permission and returns an future result that returns a
boolean indicating if all the given permission were granted or denied.
"""
app = AndroidApplication.instance()
f = app.create_future()
def on_result(perms):
allowed = True
for p in permissions:
allowed = allowed and perms.get(p, False)
f.set_result(allowed)
app.request_permissions(permissions).then(on_result)
return f |
def find(self, name):
"""
Return a list of subset of VM that match the pattern name
@param name (str): the vm name of the virtual machine
@param name (Obj): the vm object that represent the virtual
machine (can be Pro or Smart)
@return (list): the subset containing the serach result.
"""
if name.__class__ is 'base.Server.Pro' or name.__class__ is 'base.Server.Smart':
# print('DEBUG: matched VM object %s' % name.__class__)
pattern = name.vm_name
else:
# print('DEBUG: matched Str Object %s' % name.__class__)
pattern = name
# 14/06/2013: since this method is called within a thread and I wont to pass the return objects with queue or
# call back, I will allocate a list inside the Interface class object itself, which contain all of the vm found
# 02/11/2015: this must be changed ASAP! it's a mess this way... what was I thinking??
self.last_search_result = [vm for vm in self if pattern in vm.vm_name]
return self.last_search_result | Return a list of subset of VM that match the pattern name
@param name (str): the vm name of the virtual machine
@param name (Obj): the vm object that represent the virtual
machine (can be Pro or Smart)
@return (list): the subset containing the serach result. | Below is the the instruction that describes the task:
### Input:
Return a list of subset of VM that match the pattern name
@param name (str): the vm name of the virtual machine
@param name (Obj): the vm object that represent the virtual
machine (can be Pro or Smart)
@return (list): the subset containing the serach result.
### Response:
def find(self, name):
"""
Return a list of subset of VM that match the pattern name
@param name (str): the vm name of the virtual machine
@param name (Obj): the vm object that represent the virtual
machine (can be Pro or Smart)
@return (list): the subset containing the serach result.
"""
if name.__class__ is 'base.Server.Pro' or name.__class__ is 'base.Server.Smart':
# print('DEBUG: matched VM object %s' % name.__class__)
pattern = name.vm_name
else:
# print('DEBUG: matched Str Object %s' % name.__class__)
pattern = name
# 14/06/2013: since this method is called within a thread and I wont to pass the return objects with queue or
# call back, I will allocate a list inside the Interface class object itself, which contain all of the vm found
# 02/11/2015: this must be changed ASAP! it's a mess this way... what was I thinking??
self.last_search_result = [vm for vm in self if pattern in vm.vm_name]
return self.last_search_result |
def body_lines(self):
""" Return a normalized list of lines from message's body. """
if not self.message.is_multipart():
body = self.message.get_payload(None, decode=True)
else:
_, _, body = self.message.as_string().partition("\n\n")
if isinstance(body, bytes):
for enc in ['ascii', 'utf-8']:
try:
body = body.decode(enc)
break
except UnicodeDecodeError:
continue
else:
body = self.message.get_payload(None, decode=False)
return body.splitlines(True) | Return a normalized list of lines from message's body. | Below is the the instruction that describes the task:
### Input:
Return a normalized list of lines from message's body.
### Response:
def body_lines(self):
""" Return a normalized list of lines from message's body. """
if not self.message.is_multipart():
body = self.message.get_payload(None, decode=True)
else:
_, _, body = self.message.as_string().partition("\n\n")
if isinstance(body, bytes):
for enc in ['ascii', 'utf-8']:
try:
body = body.decode(enc)
break
except UnicodeDecodeError:
continue
else:
body = self.message.get_payload(None, decode=False)
return body.splitlines(True) |
def setup_http_session():
"""Sets the global HTTP settings, such as proxy and headers."""
if args.http_proxy:
streamlink.set_option("http-proxy", args.http_proxy)
if args.https_proxy:
streamlink.set_option("https-proxy", args.https_proxy)
if args.http_cookie:
streamlink.set_option("http-cookies", dict(args.http_cookie))
if args.http_header:
streamlink.set_option("http-headers", dict(args.http_header))
if args.http_query_param:
streamlink.set_option("http-query-params", dict(args.http_query_param))
if args.http_ignore_env:
streamlink.set_option("http-trust-env", False)
if args.http_no_ssl_verify:
streamlink.set_option("http-ssl-verify", False)
if args.http_disable_dh:
streamlink.set_option("http-disable-dh", True)
if args.http_ssl_cert:
streamlink.set_option("http-ssl-cert", args.http_ssl_cert)
if args.http_ssl_cert_crt_key:
streamlink.set_option("http-ssl-cert", tuple(args.http_ssl_cert_crt_key))
if args.http_timeout:
streamlink.set_option("http-timeout", args.http_timeout)
if args.http_cookies:
streamlink.set_option("http-cookies", args.http_cookies)
if args.http_headers:
streamlink.set_option("http-headers", args.http_headers)
if args.http_query_params:
streamlink.set_option("http-query-params", args.http_query_params) | Sets the global HTTP settings, such as proxy and headers. | Below is the the instruction that describes the task:
### Input:
Sets the global HTTP settings, such as proxy and headers.
### Response:
def setup_http_session():
"""Sets the global HTTP settings, such as proxy and headers."""
if args.http_proxy:
streamlink.set_option("http-proxy", args.http_proxy)
if args.https_proxy:
streamlink.set_option("https-proxy", args.https_proxy)
if args.http_cookie:
streamlink.set_option("http-cookies", dict(args.http_cookie))
if args.http_header:
streamlink.set_option("http-headers", dict(args.http_header))
if args.http_query_param:
streamlink.set_option("http-query-params", dict(args.http_query_param))
if args.http_ignore_env:
streamlink.set_option("http-trust-env", False)
if args.http_no_ssl_verify:
streamlink.set_option("http-ssl-verify", False)
if args.http_disable_dh:
streamlink.set_option("http-disable-dh", True)
if args.http_ssl_cert:
streamlink.set_option("http-ssl-cert", args.http_ssl_cert)
if args.http_ssl_cert_crt_key:
streamlink.set_option("http-ssl-cert", tuple(args.http_ssl_cert_crt_key))
if args.http_timeout:
streamlink.set_option("http-timeout", args.http_timeout)
if args.http_cookies:
streamlink.set_option("http-cookies", args.http_cookies)
if args.http_headers:
streamlink.set_option("http-headers", args.http_headers)
if args.http_query_params:
streamlink.set_option("http-query-params", args.http_query_params) |
def send_email(sender, receivers, subject, text=None, html=None, charset='utf-8', config=Injected):
"""Sends an email.
:param sender: Sender as string or None for default got from config.
:param receivers: String or array of recipients.
:param subject: Subject.
:param text: Plain text message.
:param html: Html message.
:param charset: Charset.
:param config: Current configuration
"""
smtp_config = config['SMTP']
# Receivers must be an array.
if not isinstance(receivers, list) and not isinstance(receivers, tuple):
receivers = [receivers]
# Create the messages
msgs = []
if text is not None:
msgs.append(MIMEText(text, 'plain', charset))
if html is not None:
msgs.append(MIMEText(html, 'html', charset))
if len(msgs) == 0:
raise Exception("No message is given.")
if len(msgs) == 1:
msg = msgs[0]
else:
msg = MIMEMultipart()
for m in msgs:
msg.attach(m)
# Default sender.
if sender is None:
sender = smtp_config['SENDER']
# Fill the info.
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = ", ".join(receivers)
# Send.
smtp_server = smtplib.SMTP(**(smtp_config['SERVER']))
smtp_server.sendmail(sender, receivers, msg.as_string())
smtp_server.quit() | Sends an email.
:param sender: Sender as string or None for default got from config.
:param receivers: String or array of recipients.
:param subject: Subject.
:param text: Plain text message.
:param html: Html message.
:param charset: Charset.
:param config: Current configuration | Below is the the instruction that describes the task:
### Input:
Sends an email.
:param sender: Sender as string or None for default got from config.
:param receivers: String or array of recipients.
:param subject: Subject.
:param text: Plain text message.
:param html: Html message.
:param charset: Charset.
:param config: Current configuration
### Response:
def send_email(sender, receivers, subject, text=None, html=None, charset='utf-8', config=Injected):
"""Sends an email.
:param sender: Sender as string or None for default got from config.
:param receivers: String or array of recipients.
:param subject: Subject.
:param text: Plain text message.
:param html: Html message.
:param charset: Charset.
:param config: Current configuration
"""
smtp_config = config['SMTP']
# Receivers must be an array.
if not isinstance(receivers, list) and not isinstance(receivers, tuple):
receivers = [receivers]
# Create the messages
msgs = []
if text is not None:
msgs.append(MIMEText(text, 'plain', charset))
if html is not None:
msgs.append(MIMEText(html, 'html', charset))
if len(msgs) == 0:
raise Exception("No message is given.")
if len(msgs) == 1:
msg = msgs[0]
else:
msg = MIMEMultipart()
for m in msgs:
msg.attach(m)
# Default sender.
if sender is None:
sender = smtp_config['SENDER']
# Fill the info.
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = ", ".join(receivers)
# Send.
smtp_server = smtplib.SMTP(**(smtp_config['SERVER']))
smtp_server.sendmail(sender, receivers, msg.as_string())
smtp_server.quit() |
def fromISO8601TimeAndDate(klass, iso8601string, tzinfo=None):
"""Return a new Time instance from a string formated as in ISO 8601.
If the given string contains no timezone, it is assumed to be in the
timezone specified by the parameter `tzinfo`, or UTC if tzinfo is None.
An input string with an explicit timezone will always override tzinfo.
If the given iso8601string does not contain all parts of the time, they
will default to 0 in the timezone given by `tzinfo`.
WARNING: this function is incomplete. ISO is dumb and their standards
are not free. Only a subset of all valid ISO 8601 dates are parsed,
because I can't find a formal description of the format. However,
common ones should work.
"""
def calculateTimezone():
if groups['zulu'] == 'Z':
return FixedOffset(0, 0)
else:
tzhour = groups.pop('tzhour')
tzmin = groups.pop('tzmin')
if tzhour is not None:
return FixedOffset(int(tzhour), int(tzmin or 0))
return tzinfo or FixedOffset(0, 0)
def coerceGroups():
groups['month'] = groups['month1'] or groups['month2']
groups['week'] = groups['week1'] or groups['week2']
# don't include fractional seconds, because it's not an integer.
defaultTo0 = ['hour', 'minute', 'second']
defaultTo1 = ['month', 'day', 'week', 'weekday', 'dayofyear']
if groups['fractionalsec'] is None:
groups['fractionalsec'] = '0'
for key in defaultTo0:
if groups[key] is None:
groups[key] = 0
for key in defaultTo1:
if groups[key] is None:
groups[key] = 1
groups['fractionalsec'] = float('.'+groups['fractionalsec'])
for key in defaultTo0 + defaultTo1 + ['year']:
groups[key] = int(groups[key])
for group, min, max in [
# some years have only 52 weeks
('week', 1, 53),
('weekday', 1, 7),
('month', 1, 12),
('day', 1, 31),
('hour', 0, 24),
('minute', 0, 59),
# Sometime in the 22nd century AD, two leap seconds will be
# required every year. In the 25th century AD, four every
# year. We'll ignore that for now though because it would be
# tricky to get right and we certainly don't need it for our
# target applications. In other words, post-singularity
# Martian users, please do not rely on this code for
# compatibility with Greater Galactic Protectorate of Earth
# date/time formatting! Apologies, but no library I know of in
# Python is sufficient for processing their dates and times
# without ADA bindings to get the radiation-safety zone counter
# correct. -glyph
('second', 0, 61),
# don't forget leap years
('dayofyear', 1, 366)]:
if not min <= groups[group] <= max:
raise ValueError, '%s must be in %i..%i' % (group, min, max)
def determineResolution():
if match.group('fractionalsec') is not None:
return max(datetime.timedelta.resolution,
datetime.timedelta(
microseconds=1 * 10 ** -len(
match.group('fractionalsec')) * 1000000))
for testGroup, resolution in [
('second', datetime.timedelta(seconds=1)),
('minute', datetime.timedelta(minutes=1)),
('hour', datetime.timedelta(hours=1)),
('weekday', datetime.timedelta(days=1)),
('dayofyear', datetime.timedelta(days=1)),
('day', datetime.timedelta(days=1)),
('week1', datetime.timedelta(weeks=1)),
('week2', datetime.timedelta(weeks=1))]:
if match.group(testGroup) is not None:
return resolution
if match.group('month1') is not None \
or match.group('month2') is not None:
if self._time.month == 12:
return datetime.timedelta(days=31)
nextMonth = self._time.replace(month=self._time.month+1)
return nextMonth - self._time
else:
nextYear = self._time.replace(year=self._time.year+1)
return nextYear - self._time
def calculateDtime(tzinfo):
"""Calculate a datetime for the start of the addressed period."""
if match.group('week1') is not None \
or match.group('week2') is not None:
if not 0 < groups['week'] <= 53:
raise ValueError(
'week must be in 1..53 (was %i)' % (groups['week'],))
dtime = datetime.datetime(
groups['year'],
1,
4,
groups['hour'],
groups['minute'],
groups['second'],
int(round(groups['fractionalsec'] * 1000000)),
tzinfo=tzinfo
)
dtime -= datetime.timedelta(days = dtime.weekday())
dtime += datetime.timedelta(
days = (groups['week']-1) * 7 + groups['weekday'] - 1)
if dtime.isocalendar() != (
groups['year'], groups['week'], groups['weekday']):
# actually the problem could be an error in my logic, but
# nothing should cause this but requesting week 53 of a
# year with 52 weeks.
raise ValueError('year %04i has no week %02i' %
(groups['year'], groups['week']))
return dtime
if match.group('dayofyear') is not None:
dtime = datetime.datetime(
groups['year'],
1,
1,
groups['hour'],
groups['minute'],
groups['second'],
int(round(groups['fractionalsec'] * 1000000)),
tzinfo=tzinfo
)
dtime += datetime.timedelta(days=groups['dayofyear']-1)
if dtime.year != groups['year']:
raise ValueError(
'year %04i has no day of year %03i' %
(groups['year'], groups['dayofyear']))
return dtime
else:
return datetime.datetime(
groups['year'],
groups['month'],
groups['day'],
groups['hour'],
groups['minute'],
groups['second'],
int(round(groups['fractionalsec'] * 1000000)),
tzinfo=tzinfo
)
match = klass.iso8601pattern.match(iso8601string)
if match is None:
raise ValueError(
'%r could not be parsed as an ISO 8601 date and time' %
(iso8601string,))
groups = match.groupdict()
coerceGroups()
if match.group('hour') is not None:
timezone = calculateTimezone()
else:
timezone = None
self = klass.fromDatetime(calculateDtime(timezone))
self.resolution = determineResolution()
return self | Return a new Time instance from a string formated as in ISO 8601.
If the given string contains no timezone, it is assumed to be in the
timezone specified by the parameter `tzinfo`, or UTC if tzinfo is None.
An input string with an explicit timezone will always override tzinfo.
If the given iso8601string does not contain all parts of the time, they
will default to 0 in the timezone given by `tzinfo`.
WARNING: this function is incomplete. ISO is dumb and their standards
are not free. Only a subset of all valid ISO 8601 dates are parsed,
because I can't find a formal description of the format. However,
common ones should work. | Below is the the instruction that describes the task:
### Input:
Return a new Time instance from a string formated as in ISO 8601.
If the given string contains no timezone, it is assumed to be in the
timezone specified by the parameter `tzinfo`, or UTC if tzinfo is None.
An input string with an explicit timezone will always override tzinfo.
If the given iso8601string does not contain all parts of the time, they
will default to 0 in the timezone given by `tzinfo`.
WARNING: this function is incomplete. ISO is dumb and their standards
are not free. Only a subset of all valid ISO 8601 dates are parsed,
because I can't find a formal description of the format. However,
common ones should work.
### Response:
def fromISO8601TimeAndDate(klass, iso8601string, tzinfo=None):
"""Return a new Time instance from a string formated as in ISO 8601.
If the given string contains no timezone, it is assumed to be in the
timezone specified by the parameter `tzinfo`, or UTC if tzinfo is None.
An input string with an explicit timezone will always override tzinfo.
If the given iso8601string does not contain all parts of the time, they
will default to 0 in the timezone given by `tzinfo`.
WARNING: this function is incomplete. ISO is dumb and their standards
are not free. Only a subset of all valid ISO 8601 dates are parsed,
because I can't find a formal description of the format. However,
common ones should work.
"""
def calculateTimezone():
if groups['zulu'] == 'Z':
return FixedOffset(0, 0)
else:
tzhour = groups.pop('tzhour')
tzmin = groups.pop('tzmin')
if tzhour is not None:
return FixedOffset(int(tzhour), int(tzmin or 0))
return tzinfo or FixedOffset(0, 0)
def coerceGroups():
groups['month'] = groups['month1'] or groups['month2']
groups['week'] = groups['week1'] or groups['week2']
# don't include fractional seconds, because it's not an integer.
defaultTo0 = ['hour', 'minute', 'second']
defaultTo1 = ['month', 'day', 'week', 'weekday', 'dayofyear']
if groups['fractionalsec'] is None:
groups['fractionalsec'] = '0'
for key in defaultTo0:
if groups[key] is None:
groups[key] = 0
for key in defaultTo1:
if groups[key] is None:
groups[key] = 1
groups['fractionalsec'] = float('.'+groups['fractionalsec'])
for key in defaultTo0 + defaultTo1 + ['year']:
groups[key] = int(groups[key])
for group, min, max in [
# some years have only 52 weeks
('week', 1, 53),
('weekday', 1, 7),
('month', 1, 12),
('day', 1, 31),
('hour', 0, 24),
('minute', 0, 59),
# Sometime in the 22nd century AD, two leap seconds will be
# required every year. In the 25th century AD, four every
# year. We'll ignore that for now though because it would be
# tricky to get right and we certainly don't need it for our
# target applications. In other words, post-singularity
# Martian users, please do not rely on this code for
# compatibility with Greater Galactic Protectorate of Earth
# date/time formatting! Apologies, but no library I know of in
# Python is sufficient for processing their dates and times
# without ADA bindings to get the radiation-safety zone counter
# correct. -glyph
('second', 0, 61),
# don't forget leap years
('dayofyear', 1, 366)]:
if not min <= groups[group] <= max:
raise ValueError, '%s must be in %i..%i' % (group, min, max)
def determineResolution():
if match.group('fractionalsec') is not None:
return max(datetime.timedelta.resolution,
datetime.timedelta(
microseconds=1 * 10 ** -len(
match.group('fractionalsec')) * 1000000))
for testGroup, resolution in [
('second', datetime.timedelta(seconds=1)),
('minute', datetime.timedelta(minutes=1)),
('hour', datetime.timedelta(hours=1)),
('weekday', datetime.timedelta(days=1)),
('dayofyear', datetime.timedelta(days=1)),
('day', datetime.timedelta(days=1)),
('week1', datetime.timedelta(weeks=1)),
('week2', datetime.timedelta(weeks=1))]:
if match.group(testGroup) is not None:
return resolution
if match.group('month1') is not None \
or match.group('month2') is not None:
if self._time.month == 12:
return datetime.timedelta(days=31)
nextMonth = self._time.replace(month=self._time.month+1)
return nextMonth - self._time
else:
nextYear = self._time.replace(year=self._time.year+1)
return nextYear - self._time
def calculateDtime(tzinfo):
"""Calculate a datetime for the start of the addressed period."""
if match.group('week1') is not None \
or match.group('week2') is not None:
if not 0 < groups['week'] <= 53:
raise ValueError(
'week must be in 1..53 (was %i)' % (groups['week'],))
dtime = datetime.datetime(
groups['year'],
1,
4,
groups['hour'],
groups['minute'],
groups['second'],
int(round(groups['fractionalsec'] * 1000000)),
tzinfo=tzinfo
)
dtime -= datetime.timedelta(days = dtime.weekday())
dtime += datetime.timedelta(
days = (groups['week']-1) * 7 + groups['weekday'] - 1)
if dtime.isocalendar() != (
groups['year'], groups['week'], groups['weekday']):
# actually the problem could be an error in my logic, but
# nothing should cause this but requesting week 53 of a
# year with 52 weeks.
raise ValueError('year %04i has no week %02i' %
(groups['year'], groups['week']))
return dtime
if match.group('dayofyear') is not None:
dtime = datetime.datetime(
groups['year'],
1,
1,
groups['hour'],
groups['minute'],
groups['second'],
int(round(groups['fractionalsec'] * 1000000)),
tzinfo=tzinfo
)
dtime += datetime.timedelta(days=groups['dayofyear']-1)
if dtime.year != groups['year']:
raise ValueError(
'year %04i has no day of year %03i' %
(groups['year'], groups['dayofyear']))
return dtime
else:
return datetime.datetime(
groups['year'],
groups['month'],
groups['day'],
groups['hour'],
groups['minute'],
groups['second'],
int(round(groups['fractionalsec'] * 1000000)),
tzinfo=tzinfo
)
match = klass.iso8601pattern.match(iso8601string)
if match is None:
raise ValueError(
'%r could not be parsed as an ISO 8601 date and time' %
(iso8601string,))
groups = match.groupdict()
coerceGroups()
if match.group('hour') is not None:
timezone = calculateTimezone()
else:
timezone = None
self = klass.fromDatetime(calculateDtime(timezone))
self.resolution = determineResolution()
return self |
def _read_tags_for_revset(self, spec):
"""
Return TaggedRevision for each tag/rev combination in the revset spec
"""
cmd = [
'log', '--style', 'default', '--config', 'defaults.log=',
'-r', spec]
res = self._invoke(*cmd)
header_pattern = re.compile(r'(?P<header>\w+?):\s+(?P<value>.*)')
match_res = map(header_pattern.match, res.splitlines())
matched_lines = filter(None, match_res)
matches = (match.groupdict() for match in matched_lines)
for match in matches:
if match['header'] == 'changeset':
id, sep, rev = match['value'].partition(':')
if match['header'] == 'tag':
tag = match['value']
yield TaggedRevision(tag, rev) | Return TaggedRevision for each tag/rev combination in the revset spec | Below is the the instruction that describes the task:
### Input:
Return TaggedRevision for each tag/rev combination in the revset spec
### Response:
def _read_tags_for_revset(self, spec):
"""
Return TaggedRevision for each tag/rev combination in the revset spec
"""
cmd = [
'log', '--style', 'default', '--config', 'defaults.log=',
'-r', spec]
res = self._invoke(*cmd)
header_pattern = re.compile(r'(?P<header>\w+?):\s+(?P<value>.*)')
match_res = map(header_pattern.match, res.splitlines())
matched_lines = filter(None, match_res)
matches = (match.groupdict() for match in matched_lines)
for match in matches:
if match['header'] == 'changeset':
id, sep, rev = match['value'].partition(':')
if match['header'] == 'tag':
tag = match['value']
yield TaggedRevision(tag, rev) |
def _plaintext_data_key():
'''
Return the configured KMS data key decrypted and encoded in urlsafe base64.
Cache the result to minimize API calls to AWS.
'''
response = getattr(_plaintext_data_key, 'response', None)
cache_hit = response is not None
if not cache_hit:
response = _api_decrypt()
setattr(_plaintext_data_key, 'response', response)
key_id = response['KeyId']
plaintext = response['Plaintext']
if hasattr(plaintext, 'encode'):
plaintext = plaintext.encode(__salt_system_encoding__)
log.debug('Using key %s from %s', key_id, 'cache' if cache_hit else 'api call')
return plaintext | Return the configured KMS data key decrypted and encoded in urlsafe base64.
Cache the result to minimize API calls to AWS. | Below is the the instruction that describes the task:
### Input:
Return the configured KMS data key decrypted and encoded in urlsafe base64.
Cache the result to minimize API calls to AWS.
### Response:
def _plaintext_data_key():
'''
Return the configured KMS data key decrypted and encoded in urlsafe base64.
Cache the result to minimize API calls to AWS.
'''
response = getattr(_plaintext_data_key, 'response', None)
cache_hit = response is not None
if not cache_hit:
response = _api_decrypt()
setattr(_plaintext_data_key, 'response', response)
key_id = response['KeyId']
plaintext = response['Plaintext']
if hasattr(plaintext, 'encode'):
plaintext = plaintext.encode(__salt_system_encoding__)
log.debug('Using key %s from %s', key_id, 'cache' if cache_hit else 'api call')
return plaintext |
def apply_cut(self, cut):
"""Return a cut version of this |Subsystem|.
Args:
cut (Cut): The cut to apply to this |Subsystem|.
Returns:
Subsystem: The cut subsystem.
"""
return Subsystem(self.network, self.state, self.node_indices,
cut=cut, mice_cache=self._mice_cache) | Return a cut version of this |Subsystem|.
Args:
cut (Cut): The cut to apply to this |Subsystem|.
Returns:
Subsystem: The cut subsystem. | Below is the the instruction that describes the task:
### Input:
Return a cut version of this |Subsystem|.
Args:
cut (Cut): The cut to apply to this |Subsystem|.
Returns:
Subsystem: The cut subsystem.
### Response:
def apply_cut(self, cut):
"""Return a cut version of this |Subsystem|.
Args:
cut (Cut): The cut to apply to this |Subsystem|.
Returns:
Subsystem: The cut subsystem.
"""
return Subsystem(self.network, self.state, self.node_indices,
cut=cut, mice_cache=self._mice_cache) |
def new_event(event):
"""
Wrap a raw gRPC event in a friendlier containing class.
This picks the appropriate class from one of PutEvent or DeleteEvent and
returns a new instance.
"""
op_name = event.EventType.DESCRIPTOR.values_by_number[event.type].name
if op_name == 'PUT':
cls = PutEvent
elif op_name == 'DELETE':
cls = DeleteEvent
else:
raise Exception('Invalid op_name')
return cls(event) | Wrap a raw gRPC event in a friendlier containing class.
This picks the appropriate class from one of PutEvent or DeleteEvent and
returns a new instance. | Below is the the instruction that describes the task:
### Input:
Wrap a raw gRPC event in a friendlier containing class.
This picks the appropriate class from one of PutEvent or DeleteEvent and
returns a new instance.
### Response:
def new_event(event):
"""
Wrap a raw gRPC event in a friendlier containing class.
This picks the appropriate class from one of PutEvent or DeleteEvent and
returns a new instance.
"""
op_name = event.EventType.DESCRIPTOR.values_by_number[event.type].name
if op_name == 'PUT':
cls = PutEvent
elif op_name == 'DELETE':
cls = DeleteEvent
else:
raise Exception('Invalid op_name')
return cls(event) |
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
------
int_keys : scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits:
codes <<= self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer:
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1) | Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
------
int_keys : scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each). | Below is the the instruction that describes the task:
### Input:
Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
------
int_keys : scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each).
### Response:
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
------
int_keys : scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits:
codes <<= self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer:
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1) |
def copy_meta_from(self, ido):
"""Copies vtki meta data onto this object from another object"""
self._active_scalar_info = ido.active_scalar_info
self._active_vectors_info = ido.active_vectors_info
if hasattr(ido, '_textures'):
self._textures = ido._textures | Copies vtki meta data onto this object from another object | Below is the the instruction that describes the task:
### Input:
Copies vtki meta data onto this object from another object
### Response:
def copy_meta_from(self, ido):
"""Copies vtki meta data onto this object from another object"""
self._active_scalar_info = ido.active_scalar_info
self._active_vectors_info = ido.active_vectors_info
if hasattr(ido, '_textures'):
self._textures = ido._textures |
def GetResourceIdOrFullNameFromLink(resource_link):
"""Gets resource id or full name from resource link.
:param str resource_link:
:return:
The resource id or full name from the resource link.
:rtype: str
"""
# For named based, the resource link is the full name
if IsNameBased(resource_link):
return TrimBeginningAndEndingSlashes(resource_link)
# Padding the resource link with leading and trailing slashes if not already
if resource_link[-1] != '/':
resource_link = resource_link + '/'
if resource_link[0] != '/':
resource_link = '/' + resource_link
# The path will be in the form of
# /[resourceType]/[resourceId]/ .... /[resourceType]/[resourceId]/ or
# /[resourceType]/[resourceId]/ .... /[resourceType]/
# The result of split will be in the form of
# ["", [resourceType], [resourceId] ... ,[resourceType], [resourceId], ""]
# In the first case, to extract the resourceId it will the element
# before last ( at length -2 ) and the the type will before it
# ( at length -3 )
# In the second case, to extract the resource type it will the element
# before last ( at length -2 )
path_parts = resource_link.split("/")
if len(path_parts) % 2 == 0:
# request in form
# /[resourceType]/[resourceId]/ .... /[resourceType]/[resourceId]/.
return str(path_parts[-2])
return None | Gets resource id or full name from resource link.
:param str resource_link:
:return:
The resource id or full name from the resource link.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Gets resource id or full name from resource link.
:param str resource_link:
:return:
The resource id or full name from the resource link.
:rtype: str
### Response:
def GetResourceIdOrFullNameFromLink(resource_link):
"""Gets resource id or full name from resource link.
:param str resource_link:
:return:
The resource id or full name from the resource link.
:rtype: str
"""
# For named based, the resource link is the full name
if IsNameBased(resource_link):
return TrimBeginningAndEndingSlashes(resource_link)
# Padding the resource link with leading and trailing slashes if not already
if resource_link[-1] != '/':
resource_link = resource_link + '/'
if resource_link[0] != '/':
resource_link = '/' + resource_link
# The path will be in the form of
# /[resourceType]/[resourceId]/ .... /[resourceType]/[resourceId]/ or
# /[resourceType]/[resourceId]/ .... /[resourceType]/
# The result of split will be in the form of
# ["", [resourceType], [resourceId] ... ,[resourceType], [resourceId], ""]
# In the first case, to extract the resourceId it will the element
# before last ( at length -2 ) and the the type will before it
# ( at length -3 )
# In the second case, to extract the resource type it will the element
# before last ( at length -2 )
path_parts = resource_link.split("/")
if len(path_parts) % 2 == 0:
# request in form
# /[resourceType]/[resourceId]/ .... /[resourceType]/[resourceId]/.
return str(path_parts[-2])
return None |
def set_end(self,time,pass_to_command_line=True):
"""
Set the GPS end time of the analysis node by setting a --gps-end-time
option to the node when it is executed.
@param time: GPS end time of job.
@bool pass_to_command_line: add gps-end-time as variable option.
"""
if pass_to_command_line:
self.add_var_opt('gps-end-time',time)
self.__end = time
self.__data_end = time | Set the GPS end time of the analysis node by setting a --gps-end-time
option to the node when it is executed.
@param time: GPS end time of job.
@bool pass_to_command_line: add gps-end-time as variable option. | Below is the the instruction that describes the task:
### Input:
Set the GPS end time of the analysis node by setting a --gps-end-time
option to the node when it is executed.
@param time: GPS end time of job.
@bool pass_to_command_line: add gps-end-time as variable option.
### Response:
def set_end(self,time,pass_to_command_line=True):
"""
Set the GPS end time of the analysis node by setting a --gps-end-time
option to the node when it is executed.
@param time: GPS end time of job.
@bool pass_to_command_line: add gps-end-time as variable option.
"""
if pass_to_command_line:
self.add_var_opt('gps-end-time',time)
self.__end = time
self.__data_end = time |
def create_input(option, template_name, template_location="template"):
'''create an input file using jinja2 by filling a template
with the values from the option variable passed in.'''
# restructure option list into jinja2 input format
jinja2_input = {}
for item in option:
try:
jinja2_input.update(item)
except ValueError:
raise RuntimeError(
("inputs.py, create_input : format of item '{0}' is not "
"supported. Expecting a dictionary.".format(str(item))))
# load the template and fill it with the option variable contents
import jinja2
try:
template_loader = jinja2.FileSystemLoader(searchpath=template_location)
template_env = jinja2.Environment(loader=template_loader)
template = template_env.get_template(template_name)
output_text = template.render(jinja2_input)
except jinja2.TemplateNotFound:
raise RuntimeError("template '{0}' not found".format(template_name))
# return the particular input file as a string
return output_text | create an input file using jinja2 by filling a template
with the values from the option variable passed in. | Below is the the instruction that describes the task:
### Input:
create an input file using jinja2 by filling a template
with the values from the option variable passed in.
### Response:
def create_input(option, template_name, template_location="template"):
'''create an input file using jinja2 by filling a template
with the values from the option variable passed in.'''
# restructure option list into jinja2 input format
jinja2_input = {}
for item in option:
try:
jinja2_input.update(item)
except ValueError:
raise RuntimeError(
("inputs.py, create_input : format of item '{0}' is not "
"supported. Expecting a dictionary.".format(str(item))))
# load the template and fill it with the option variable contents
import jinja2
try:
template_loader = jinja2.FileSystemLoader(searchpath=template_location)
template_env = jinja2.Environment(loader=template_loader)
template = template_env.get_template(template_name)
output_text = template.render(jinja2_input)
except jinja2.TemplateNotFound:
raise RuntimeError("template '{0}' not found".format(template_name))
# return the particular input file as a string
return output_text |
def _cutout_expnum(observation, sky_coord, radius):
"""
Get a cutout from an exposure based on the RA/DEC location.
@param observation: The Observation object that contains the expusre number information.
@type observation: Observation
@param sky_coord: which RA/DEC is needed,
@type sky_coord: SkyCoord
@param radius:
@type radius: Quantity
@return: HDUList containing the cutout image.
@rtype: list(HDUList)
"""
uri = observation.get_image_uri()
cutout_filehandle = tempfile.NamedTemporaryFile()
disposition_filename = client.copy(uri + "({},{},{})".format(sky_coord.ra.to('degree').value,
sky_coord.dec.to('degree').value,
radius.to('degree').value),
cutout_filehandle.name,
disposition=True)
cutouts = decompose_content_decomposition(disposition_filename)
cutout_filehandle.seek(0)
hdulist = fits.open(cutout_filehandle)
hdulist.verify('silentfix+ignore')
logger.debug("Initial Length of HDUList: {}".format(len(hdulist)))
# Make sure here is a primaryHDU
if len(hdulist) == 1:
phdu = fits.PrimaryHDU()
phdu.header['ORIGIN'] = "OSSOS"
hdulist.insert(0, phdu)
logger.debug("Final Length of HDUList: {}".format(len(hdulist)))
if len(cutouts) != len(hdulist) - 1:
raise ValueError("Wrong number of cutout structures found in Content-Disposition response.")
for hdu in hdulist[1:]:
cutout = cutouts.pop(0)
if 'ASTLEVEL' not in hdu.header:
print("WARNING: ******* NO ASTLEVEL KEYWORD ********** for {0} ********".format(observation.get_image_uri))
hdu.header['ASTLEVEL'] = 0
hdu.header['EXTNO'] = cutout[0]
naxis1 = hdu.header['NAXIS1']
naxis2 = hdu.header['NAXIS2']
default_datasec = "[{}:{},{}:{}]".format(1, naxis1, 1, naxis2)
datasec = hdu.header.get('DATASEC', default_datasec)
datasec = datasec_to_list(datasec)
corners = datasec
for idx in range(len(corners)):
try:
corners[idx] = int(cutout[idx+1])
except Exception:
pass
hdu.header['DATASEC'] = reset_datasec("[{}:{},{}:{}]".format(corners[0],
corners[1],
corners[2],
corners[3]),
hdu.header.get('DATASEC', default_datasec),
hdu.header['NAXIS1'],
hdu.header['NAXIS2'])
hdu.header['XOFFSET'] = int(corners[0]) - 1
hdu.header['YOFFSET'] = int(corners[2]) - 1
hdu.converter = CoordinateConverter(hdu.header['XOFFSET'], hdu.header['YOFFSET'])
try:
hdu.wcs = WCS(hdu.header)
except Exception as ex:
logger.error("Failed trying to initialize the WCS for {}".format(uri))
raise ex
logger.debug("Sending back {}".format(hdulist))
return hdulist | Get a cutout from an exposure based on the RA/DEC location.
@param observation: The Observation object that contains the expusre number information.
@type observation: Observation
@param sky_coord: which RA/DEC is needed,
@type sky_coord: SkyCoord
@param radius:
@type radius: Quantity
@return: HDUList containing the cutout image.
@rtype: list(HDUList) | Below is the the instruction that describes the task:
### Input:
Get a cutout from an exposure based on the RA/DEC location.
@param observation: The Observation object that contains the expusre number information.
@type observation: Observation
@param sky_coord: which RA/DEC is needed,
@type sky_coord: SkyCoord
@param radius:
@type radius: Quantity
@return: HDUList containing the cutout image.
@rtype: list(HDUList)
### Response:
def _cutout_expnum(observation, sky_coord, radius):
"""
Get a cutout from an exposure based on the RA/DEC location.
@param observation: The Observation object that contains the expusre number information.
@type observation: Observation
@param sky_coord: which RA/DEC is needed,
@type sky_coord: SkyCoord
@param radius:
@type radius: Quantity
@return: HDUList containing the cutout image.
@rtype: list(HDUList)
"""
uri = observation.get_image_uri()
cutout_filehandle = tempfile.NamedTemporaryFile()
disposition_filename = client.copy(uri + "({},{},{})".format(sky_coord.ra.to('degree').value,
sky_coord.dec.to('degree').value,
radius.to('degree').value),
cutout_filehandle.name,
disposition=True)
cutouts = decompose_content_decomposition(disposition_filename)
cutout_filehandle.seek(0)
hdulist = fits.open(cutout_filehandle)
hdulist.verify('silentfix+ignore')
logger.debug("Initial Length of HDUList: {}".format(len(hdulist)))
# Make sure here is a primaryHDU
if len(hdulist) == 1:
phdu = fits.PrimaryHDU()
phdu.header['ORIGIN'] = "OSSOS"
hdulist.insert(0, phdu)
logger.debug("Final Length of HDUList: {}".format(len(hdulist)))
if len(cutouts) != len(hdulist) - 1:
raise ValueError("Wrong number of cutout structures found in Content-Disposition response.")
for hdu in hdulist[1:]:
cutout = cutouts.pop(0)
if 'ASTLEVEL' not in hdu.header:
print("WARNING: ******* NO ASTLEVEL KEYWORD ********** for {0} ********".format(observation.get_image_uri))
hdu.header['ASTLEVEL'] = 0
hdu.header['EXTNO'] = cutout[0]
naxis1 = hdu.header['NAXIS1']
naxis2 = hdu.header['NAXIS2']
default_datasec = "[{}:{},{}:{}]".format(1, naxis1, 1, naxis2)
datasec = hdu.header.get('DATASEC', default_datasec)
datasec = datasec_to_list(datasec)
corners = datasec
for idx in range(len(corners)):
try:
corners[idx] = int(cutout[idx+1])
except Exception:
pass
hdu.header['DATASEC'] = reset_datasec("[{}:{},{}:{}]".format(corners[0],
corners[1],
corners[2],
corners[3]),
hdu.header.get('DATASEC', default_datasec),
hdu.header['NAXIS1'],
hdu.header['NAXIS2'])
hdu.header['XOFFSET'] = int(corners[0]) - 1
hdu.header['YOFFSET'] = int(corners[2]) - 1
hdu.converter = CoordinateConverter(hdu.header['XOFFSET'], hdu.header['YOFFSET'])
try:
hdu.wcs = WCS(hdu.header)
except Exception as ex:
logger.error("Failed trying to initialize the WCS for {}".format(uri))
raise ex
logger.debug("Sending back {}".format(hdulist))
return hdulist |
def __get_keys(self, name='master', passphrase=None):
'''
Returns a key object for a key in the pki-dir
'''
path = os.path.join(self.opts['pki_dir'],
name + '.pem')
if not os.path.exists(path):
log.info('Generating %s keys: %s', name, self.opts['pki_dir'])
gen_keys(self.opts['pki_dir'],
name,
self.opts['keysize'],
self.opts.get('user'),
passphrase)
if HAS_M2:
key_error = RSA.RSAError
else:
key_error = ValueError
try:
key = get_rsa_key(path, passphrase)
except key_error as e:
message = 'Unable to read key: {0}; passphrase may be incorrect'.format(path)
log.error(message)
raise MasterExit(message)
log.debug('Loaded %s key: %s', name, path)
return key | Returns a key object for a key in the pki-dir | Below is the the instruction that describes the task:
### Input:
Returns a key object for a key in the pki-dir
### Response:
def __get_keys(self, name='master', passphrase=None):
'''
Returns a key object for a key in the pki-dir
'''
path = os.path.join(self.opts['pki_dir'],
name + '.pem')
if not os.path.exists(path):
log.info('Generating %s keys: %s', name, self.opts['pki_dir'])
gen_keys(self.opts['pki_dir'],
name,
self.opts['keysize'],
self.opts.get('user'),
passphrase)
if HAS_M2:
key_error = RSA.RSAError
else:
key_error = ValueError
try:
key = get_rsa_key(path, passphrase)
except key_error as e:
message = 'Unable to read key: {0}; passphrase may be incorrect'.format(path)
log.error(message)
raise MasterExit(message)
log.debug('Loaded %s key: %s', name, path)
return key |
def almost_hermitian(gate: Gate) -> bool:
"""Return true if gate tensor is (almost) Hermitian"""
return np.allclose(asarray(gate.asoperator()),
asarray(gate.H.asoperator())) | Return true if gate tensor is (almost) Hermitian | Below is the the instruction that describes the task:
### Input:
Return true if gate tensor is (almost) Hermitian
### Response:
def almost_hermitian(gate: Gate) -> bool:
"""Return true if gate tensor is (almost) Hermitian"""
return np.allclose(asarray(gate.asoperator()),
asarray(gate.H.asoperator())) |
def cupy_wrapper(func):
"""A wrapper function that converts numpy ndarray arguments to cupy
arrays, and convert any cupy arrays returned by the wrapped
function into numpy ndarrays.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
args = list(args)
for n, a in enumerate(args):
if isinstance(a, np.ndarray):
args[n] = cp.asarray(a)
for k, v in kwargs.items():
if isinstance(v, np.ndarray):
kwargs[k] = cp.asarray(v)
rtn = func(*args, **kwargs)
if isinstance(rtn, (list, tuple)):
for n, a in enumerate(rtn):
if isinstance(a, cp.core.core.ndarray):
rtn[n] = cp.asnumpy(a)
else:
if isinstance(rtn, cp.core.core.ndarray):
rtn = cp.asnumpy(rtn)
return rtn
return wrapped | A wrapper function that converts numpy ndarray arguments to cupy
arrays, and convert any cupy arrays returned by the wrapped
function into numpy ndarrays. | Below is the the instruction that describes the task:
### Input:
A wrapper function that converts numpy ndarray arguments to cupy
arrays, and convert any cupy arrays returned by the wrapped
function into numpy ndarrays.
### Response:
def cupy_wrapper(func):
"""A wrapper function that converts numpy ndarray arguments to cupy
arrays, and convert any cupy arrays returned by the wrapped
function into numpy ndarrays.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
args = list(args)
for n, a in enumerate(args):
if isinstance(a, np.ndarray):
args[n] = cp.asarray(a)
for k, v in kwargs.items():
if isinstance(v, np.ndarray):
kwargs[k] = cp.asarray(v)
rtn = func(*args, **kwargs)
if isinstance(rtn, (list, tuple)):
for n, a in enumerate(rtn):
if isinstance(a, cp.core.core.ndarray):
rtn[n] = cp.asnumpy(a)
else:
if isinstance(rtn, cp.core.core.ndarray):
rtn = cp.asnumpy(rtn)
return rtn
return wrapped |
def setall(self, key, values):
"""Set more than one value for a given key.
Replaces all the existing values for the given key with new values,
removes extra values that are already set if we don't suply enough,
and appends values to the end if there are not enough existing spots.
>>> m = MutableMultiMap(a=1, b=2, c=3)
>>> m.sort()
>>> m.keys()
['a', 'b', 'c']
>>> m.append(('b', 4))
>>> m.setall('b', [5, 6, 7])
>>> m.allitems()
[('a', 1), ('b', 5), ('c', 3), ('b', 6), ('b', 7)]
"""
key = self._conform_key(key)
values = [self._conform_value(x) for x in values]
ids = self._key_ids[key][:]
while ids and values:
id = ids.pop(0)
value = values.pop(0)
self._pairs[id] = (key, value)
if ids:
self._key_ids[key] = self._key_ids[key][:-len(ids)]
self._remove_pairs(ids)
for value in values:
self._key_ids[key].append(len(self._pairs))
self._pairs.append((key, value)) | Set more than one value for a given key.
Replaces all the existing values for the given key with new values,
removes extra values that are already set if we don't suply enough,
and appends values to the end if there are not enough existing spots.
>>> m = MutableMultiMap(a=1, b=2, c=3)
>>> m.sort()
>>> m.keys()
['a', 'b', 'c']
>>> m.append(('b', 4))
>>> m.setall('b', [5, 6, 7])
>>> m.allitems()
[('a', 1), ('b', 5), ('c', 3), ('b', 6), ('b', 7)] | Below is the the instruction that describes the task:
### Input:
Set more than one value for a given key.
Replaces all the existing values for the given key with new values,
removes extra values that are already set if we don't suply enough,
and appends values to the end if there are not enough existing spots.
>>> m = MutableMultiMap(a=1, b=2, c=3)
>>> m.sort()
>>> m.keys()
['a', 'b', 'c']
>>> m.append(('b', 4))
>>> m.setall('b', [5, 6, 7])
>>> m.allitems()
[('a', 1), ('b', 5), ('c', 3), ('b', 6), ('b', 7)]
### Response:
def setall(self, key, values):
"""Set more than one value for a given key.
Replaces all the existing values for the given key with new values,
removes extra values that are already set if we don't suply enough,
and appends values to the end if there are not enough existing spots.
>>> m = MutableMultiMap(a=1, b=2, c=3)
>>> m.sort()
>>> m.keys()
['a', 'b', 'c']
>>> m.append(('b', 4))
>>> m.setall('b', [5, 6, 7])
>>> m.allitems()
[('a', 1), ('b', 5), ('c', 3), ('b', 6), ('b', 7)]
"""
key = self._conform_key(key)
values = [self._conform_value(x) for x in values]
ids = self._key_ids[key][:]
while ids and values:
id = ids.pop(0)
value = values.pop(0)
self._pairs[id] = (key, value)
if ids:
self._key_ids[key] = self._key_ids[key][:-len(ids)]
self._remove_pairs(ids)
for value in values:
self._key_ids[key].append(len(self._pairs))
self._pairs.append((key, value)) |
def _next(self):
"""Get the next summary and present it."""
self.summaries.rotate(-1)
current_summary = self.summaries[0]
self._update_summary(current_summary) | Get the next summary and present it. | Below is the the instruction that describes the task:
### Input:
Get the next summary and present it.
### Response:
def _next(self):
"""Get the next summary and present it."""
self.summaries.rotate(-1)
current_summary = self.summaries[0]
self._update_summary(current_summary) |
def addContentLen(self, content, len):
"""Append the extra substring to the node content. NOTE: In
contrast to xmlNodeSetContentLen(), @content is supposed to
be raw text, so unescaped XML special chars are allowed,
entity references are not supported. """
libxml2mod.xmlNodeAddContentLen(self._o, content, len) | Append the extra substring to the node content. NOTE: In
contrast to xmlNodeSetContentLen(), @content is supposed to
be raw text, so unescaped XML special chars are allowed,
entity references are not supported. | Below is the the instruction that describes the task:
### Input:
Append the extra substring to the node content. NOTE: In
contrast to xmlNodeSetContentLen(), @content is supposed to
be raw text, so unescaped XML special chars are allowed,
entity references are not supported.
### Response:
def addContentLen(self, content, len):
"""Append the extra substring to the node content. NOTE: In
contrast to xmlNodeSetContentLen(), @content is supposed to
be raw text, so unescaped XML special chars are allowed,
entity references are not supported. """
libxml2mod.xmlNodeAddContentLen(self._o, content, len) |
def _cli():
'''CLI interface'''
parser = _argparse.ArgumentParser(
description='''
Present the user with a simple CLI menu, and return the option chosen.
The menu is presented via stderr.
The output is printed to stdout for piping.
'''.format(_VERSION),
epilog='''
The default for the post prompt is "Enter an option to continue: ".
If --default-index is specified, the default option value will be printed
in the post prompt as well.
'''
)
parser.add_argument(
'option',
help='The option(s) to present to the user.',
nargs='*'
)
parser.add_argument(
'--version', '-v',
help='Print the version and then exit',
action='store_true'
)
parser.add_argument(
'--pre', '-p',
help='The pre-prompt/title/introduction to the menu. [%(default)s]',
default='Options:',
metavar='TEXT'
)
parser.add_argument(
'--post', '-P',
help='The prompt presented to the user after the menu items.',
default=_NO_ARG,
metavar='TEXT'
)
parser.add_argument(
'--default-index', '-d',
help='The index of the item to use as the default',
type=int,
metavar='INT'
)
parser.add_argument(
'--indexed', '-i',
help='Print indices with the options, and allow the user to use them to choose.',
action='store_true'
)
parser.add_argument(
'--insensitive', '-I',
help=(
'Perform insensitive matching. Also drops any items that case-insensitively match'
+ ' prior items.'
),
action='store_true'
)
parser.add_argument(
'--fuzzy', '-f',
help='search for the individual words in the user input anywhere in the item strings.',
action='store_true'
)
parser.add_argument(
'--stdout',
help='Use stdout for interactive output (instead of the default: stderr).',
action='store_true'
)
# parse options
args = parser.parse_args()
# argparse nargs is awkward. Translate to be a proper plural.
options = args.option
# set the stream
stream = _sys.stdout if args.stdout else _sys.stderr
# if version, print version and exit
if args.version:
stream.write('Pimento - v{}\n'.format(_VERSION))
exit(0)
# read more options from stdin if there are are any
# but only if we're on a 'nix system with tty's
tty = '/dev/tty'
if not _sys.stdin.isatty() and _path.exists(tty):
if _sys.version_info.major == 3:
stream.write('[!] python3 input bug - tab completion not available\n')
stream.write('[!] python3 input bug - arrow support not available\n')
stream.write('[!] only known workaround is to not pipe in.\n')
options += [l.rstrip() for l in _sys.stdin]
# switch to the main tty
# this solution (to being interactive after reading from pipe)
# comes from: https://stackoverflow.com/questions/6312819/pipes-and-prompts-in-python-cli-scripts
_sys.stdin = open(tty)
# show the menu
try:
result = menu(
options,
pre_prompt=args.pre,
post_prompt=args.post,
default_index=args.default_index,
indexed=args.indexed,
insensitive=args.insensitive,
fuzzy=args.fuzzy,
stream=stream
)
# print the result (to stdout)
_sys.stdout.write(result + '\n')
except KeyboardInterrupt:
_sys.stderr.write("\nCTRL-C detected. Exiting.\n")
_sys.stderr.flush()
except Exception as e:
_sys.stdout.write("ERROR: {}\n".format(e))
exit(1) | CLI interface | Below is the the instruction that describes the task:
### Input:
CLI interface
### Response:
def _cli():
'''CLI interface'''
parser = _argparse.ArgumentParser(
description='''
Present the user with a simple CLI menu, and return the option chosen.
The menu is presented via stderr.
The output is printed to stdout for piping.
'''.format(_VERSION),
epilog='''
The default for the post prompt is "Enter an option to continue: ".
If --default-index is specified, the default option value will be printed
in the post prompt as well.
'''
)
parser.add_argument(
'option',
help='The option(s) to present to the user.',
nargs='*'
)
parser.add_argument(
'--version', '-v',
help='Print the version and then exit',
action='store_true'
)
parser.add_argument(
'--pre', '-p',
help='The pre-prompt/title/introduction to the menu. [%(default)s]',
default='Options:',
metavar='TEXT'
)
parser.add_argument(
'--post', '-P',
help='The prompt presented to the user after the menu items.',
default=_NO_ARG,
metavar='TEXT'
)
parser.add_argument(
'--default-index', '-d',
help='The index of the item to use as the default',
type=int,
metavar='INT'
)
parser.add_argument(
'--indexed', '-i',
help='Print indices with the options, and allow the user to use them to choose.',
action='store_true'
)
parser.add_argument(
'--insensitive', '-I',
help=(
'Perform insensitive matching. Also drops any items that case-insensitively match'
+ ' prior items.'
),
action='store_true'
)
parser.add_argument(
'--fuzzy', '-f',
help='search for the individual words in the user input anywhere in the item strings.',
action='store_true'
)
parser.add_argument(
'--stdout',
help='Use stdout for interactive output (instead of the default: stderr).',
action='store_true'
)
# parse options
args = parser.parse_args()
# argparse nargs is awkward. Translate to be a proper plural.
options = args.option
# set the stream
stream = _sys.stdout if args.stdout else _sys.stderr
# if version, print version and exit
if args.version:
stream.write('Pimento - v{}\n'.format(_VERSION))
exit(0)
# read more options from stdin if there are are any
# but only if we're on a 'nix system with tty's
tty = '/dev/tty'
if not _sys.stdin.isatty() and _path.exists(tty):
if _sys.version_info.major == 3:
stream.write('[!] python3 input bug - tab completion not available\n')
stream.write('[!] python3 input bug - arrow support not available\n')
stream.write('[!] only known workaround is to not pipe in.\n')
options += [l.rstrip() for l in _sys.stdin]
# switch to the main tty
# this solution (to being interactive after reading from pipe)
# comes from: https://stackoverflow.com/questions/6312819/pipes-and-prompts-in-python-cli-scripts
_sys.stdin = open(tty)
# show the menu
try:
result = menu(
options,
pre_prompt=args.pre,
post_prompt=args.post,
default_index=args.default_index,
indexed=args.indexed,
insensitive=args.insensitive,
fuzzy=args.fuzzy,
stream=stream
)
# print the result (to stdout)
_sys.stdout.write(result + '\n')
except KeyboardInterrupt:
_sys.stderr.write("\nCTRL-C detected. Exiting.\n")
_sys.stderr.flush()
except Exception as e:
_sys.stdout.write("ERROR: {}\n".format(e))
exit(1) |
def _select(self, event):
"""This is basically a proxy to trigger a pick event. This function is
connected to either a mouse motion or mouse button event (see
"self.enable") depending on "self.hover". If we're over a point, it
fires a pick event.
This probably seems bizarre, but it's required for hover mode (no mouse
click) and otherwise it's a workaround for picking artists in twinned
or overlapping axes.
Even if we're not in hover mode, pick events won't work properly for
twinned axes. Therefore, we manually go through all artists managed by
this datacursor and fire a pick event if the mouse is over an a managed
artist."""
def event_axes_data(event, ax):
"""Creates a new event will have xdata and ydata based on *ax*."""
# We need to redefine event.xdata and event.ydata for twinned axes
# to work correctly
point = event.x, event.y
x, y = ax.transData.inverted().transform_point(point)
event = copy.copy(event)
event.xdata, event.ydata = x, y
return event
def contains(artist, event):
"""Need to ensure we don't trigger a pick event for axes in a
different figure. Otherwise, picking on one figure will trigger a
datacursor in another figure."""
if event.canvas is artist.figure.canvas:
return artist.contains(event)
else:
return False, {}
# If we're on top of an annotation box, hide it if right-clicked or
# do nothing if we're in draggable mode
for anno in list(self.annotations.values()):
fixed_event = event_axes_data(event, anno.axes)
if contains(anno, fixed_event)[0]:
if event.button == self.hide_button:
self._hide_box(anno)
elif self.draggable:
return
for artist in self.artists:
fixed_event = event_axes_data(event, artist.axes)
inside, info = contains(artist, fixed_event)
if inside:
fig = artist.figure
new_event = PickEvent('pick_event', fig.canvas, fixed_event,
artist, **info)
self(new_event)
# Only fire a single pick event for one mouseevent. Otherwise
# we'll need timers, etc to avoid multiple calls
break
# Not hovering over anything...
if self.hover:
artists = itertools.chain(self.artists, self.annotations.values())
over_something = [contains(artist, event)[0] for artist in artists]
if not any(over_something):
self.hide() | This is basically a proxy to trigger a pick event. This function is
connected to either a mouse motion or mouse button event (see
"self.enable") depending on "self.hover". If we're over a point, it
fires a pick event.
This probably seems bizarre, but it's required for hover mode (no mouse
click) and otherwise it's a workaround for picking artists in twinned
or overlapping axes.
Even if we're not in hover mode, pick events won't work properly for
twinned axes. Therefore, we manually go through all artists managed by
this datacursor and fire a pick event if the mouse is over an a managed
artist. | Below is the the instruction that describes the task:
### Input:
This is basically a proxy to trigger a pick event. This function is
connected to either a mouse motion or mouse button event (see
"self.enable") depending on "self.hover". If we're over a point, it
fires a pick event.
This probably seems bizarre, but it's required for hover mode (no mouse
click) and otherwise it's a workaround for picking artists in twinned
or overlapping axes.
Even if we're not in hover mode, pick events won't work properly for
twinned axes. Therefore, we manually go through all artists managed by
this datacursor and fire a pick event if the mouse is over an a managed
artist.
### Response:
def _select(self, event):
"""This is basically a proxy to trigger a pick event. This function is
connected to either a mouse motion or mouse button event (see
"self.enable") depending on "self.hover". If we're over a point, it
fires a pick event.
This probably seems bizarre, but it's required for hover mode (no mouse
click) and otherwise it's a workaround for picking artists in twinned
or overlapping axes.
Even if we're not in hover mode, pick events won't work properly for
twinned axes. Therefore, we manually go through all artists managed by
this datacursor and fire a pick event if the mouse is over an a managed
artist."""
def event_axes_data(event, ax):
"""Creates a new event will have xdata and ydata based on *ax*."""
# We need to redefine event.xdata and event.ydata for twinned axes
# to work correctly
point = event.x, event.y
x, y = ax.transData.inverted().transform_point(point)
event = copy.copy(event)
event.xdata, event.ydata = x, y
return event
def contains(artist, event):
"""Need to ensure we don't trigger a pick event for axes in a
different figure. Otherwise, picking on one figure will trigger a
datacursor in another figure."""
if event.canvas is artist.figure.canvas:
return artist.contains(event)
else:
return False, {}
# If we're on top of an annotation box, hide it if right-clicked or
# do nothing if we're in draggable mode
for anno in list(self.annotations.values()):
fixed_event = event_axes_data(event, anno.axes)
if contains(anno, fixed_event)[0]:
if event.button == self.hide_button:
self._hide_box(anno)
elif self.draggable:
return
for artist in self.artists:
fixed_event = event_axes_data(event, artist.axes)
inside, info = contains(artist, fixed_event)
if inside:
fig = artist.figure
new_event = PickEvent('pick_event', fig.canvas, fixed_event,
artist, **info)
self(new_event)
# Only fire a single pick event for one mouseevent. Otherwise
# we'll need timers, etc to avoid multiple calls
break
# Not hovering over anything...
if self.hover:
artists = itertools.chain(self.artists, self.annotations.values())
over_something = [contains(artist, event)[0] for artist in artists]
if not any(over_something):
self.hide() |
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
) | This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused | Below is the the instruction that describes the task:
### Input:
This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
### Response:
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
) |
def run_scalpel(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Run Scalpel indel calling, either paired tumor/normal or germline calling.
"""
if region is None:
message = ("A region must be provided for Scalpel")
raise ValueError(message)
if is_paired_analysis(align_bams, items):
call_file = _run_scalpel_paired(align_bams, items, ref_file,
assoc_files, region, out_file)
else:
call_file = _run_scalpel_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return call_file | Run Scalpel indel calling, either paired tumor/normal or germline calling. | Below is the the instruction that describes the task:
### Input:
Run Scalpel indel calling, either paired tumor/normal or germline calling.
### Response:
def run_scalpel(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Run Scalpel indel calling, either paired tumor/normal or germline calling.
"""
if region is None:
message = ("A region must be provided for Scalpel")
raise ValueError(message)
if is_paired_analysis(align_bams, items):
call_file = _run_scalpel_paired(align_bams, items, ref_file,
assoc_files, region, out_file)
else:
call_file = _run_scalpel_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return call_file |
def call_fset(self, obj, value) -> None:
"""Store the given custom value and call the setter function."""
vars(obj)[self.name] = self.fset(obj, value) | Store the given custom value and call the setter function. | Below is the the instruction that describes the task:
### Input:
Store the given custom value and call the setter function.
### Response:
def call_fset(self, obj, value) -> None:
"""Store the given custom value and call the setter function."""
vars(obj)[self.name] = self.fset(obj, value) |
def delete(self, cascade=False):
"""
Deletes this playlist.
"""
if self.id:
self.connection.post('delete_playlist', playlist_id=self.id,
cascade=cascade)
self.id = None | Deletes this playlist. | Below is the the instruction that describes the task:
### Input:
Deletes this playlist.
### Response:
def delete(self, cascade=False):
"""
Deletes this playlist.
"""
if self.id:
self.connection.post('delete_playlist', playlist_id=self.id,
cascade=cascade)
self.id = None |
def mount_volume(volume, device='/dev/xvdf', mountpoint='/mnt/data', fstype='ext4'):
'''
Mount an EBS volume
Args:
volume (str): EBS volume ID
device (str): default /dev/xvdf
mountpoint (str): default /mnt/data
fstype (str): default ext4
'''
_ec2().attach_volume(volume, _host_node()['id'], device)
time.sleep(1)
sudo('mkdir -p "%s"' % mountpoint)
sudo('mount -t "%s" "%s" "%s"' % (fstype, device, mountpoint)) | Mount an EBS volume
Args:
volume (str): EBS volume ID
device (str): default /dev/xvdf
mountpoint (str): default /mnt/data
fstype (str): default ext4 | Below is the the instruction that describes the task:
### Input:
Mount an EBS volume
Args:
volume (str): EBS volume ID
device (str): default /dev/xvdf
mountpoint (str): default /mnt/data
fstype (str): default ext4
### Response:
def mount_volume(volume, device='/dev/xvdf', mountpoint='/mnt/data', fstype='ext4'):
'''
Mount an EBS volume
Args:
volume (str): EBS volume ID
device (str): default /dev/xvdf
mountpoint (str): default /mnt/data
fstype (str): default ext4
'''
_ec2().attach_volume(volume, _host_node()['id'], device)
time.sleep(1)
sudo('mkdir -p "%s"' % mountpoint)
sudo('mount -t "%s" "%s" "%s"' % (fstype, device, mountpoint)) |
def get_or_create_by_title(self, title):
"""
Fetch a title, if it exists. Create it if it doesn't.
Returns a tuple with the object first, and then a boolean that
indicates whether or not the object was created fresh. True means it's
brand new.
"""
try:
obj = self.get_by_title(title)
created = False
except DoesNotExistError:
obj = self.create(title=title)
created = True
return obj, created | Fetch a title, if it exists. Create it if it doesn't.
Returns a tuple with the object first, and then a boolean that
indicates whether or not the object was created fresh. True means it's
brand new. | Below is the the instruction that describes the task:
### Input:
Fetch a title, if it exists. Create it if it doesn't.
Returns a tuple with the object first, and then a boolean that
indicates whether or not the object was created fresh. True means it's
brand new.
### Response:
def get_or_create_by_title(self, title):
"""
Fetch a title, if it exists. Create it if it doesn't.
Returns a tuple with the object first, and then a boolean that
indicates whether or not the object was created fresh. True means it's
brand new.
"""
try:
obj = self.get_by_title(title)
created = False
except DoesNotExistError:
obj = self.create(title=title)
created = True
return obj, created |
def find_hist2d_offset(filename, reference, refwcs=None, refnames=['ra', 'dec'],
match_tolerance=5., chip_catalog=True, search_radius=15.0,
min_match=10, classify=True):
"""Iteratively look for the best cross-match between the catalog and ref.
Parameters
----------
filename : `~astropy.io.fits.HDUList` or str
Single image to extract sources for matching to
the external astrometric catalog.
reference : str or `~astropy.table.Table`
Reference catalog, either as a filename or ``astropy.Table``
containing astrometrically accurate sky coordinates for astrometric
standard sources.
refwcs : `~stwcs.wcsutil.HSTWCS`
This WCS will define the coordinate frame which will
be used to determine the offset. If None is specified, use the
WCS from the input image `filename` to build this WCS using
`build_self_reference()`.
refnames : list
List of table column names for sky coordinates of astrometric
standard sources from reference catalog.
match_tolerance : float
Tolerance (in pixels) for recognizing that a source position matches
an astrometric catalog position. Larger values allow for lower
accuracy source positions to be compared to astrometric catalog
chip_catalog : bool
Specify whether or not to write out individual source catalog for
each chip in the image.
search_radius : float
Maximum separation (in arcseconds) from source positions to look
for valid cross-matches with reference source positions.
min_match : int
Minimum number of cross-matches for an acceptable determination of
the offset.
classify : bool
Specify whether or not to use central_moments classification to
ignore likely cosmic-rays/bad-pixels when generating the source
catalog.
Returns
-------
best_offset : tuple
Offset in input image pixels between image source positions and
astrometric catalog positions that results in largest number of
matches of astrometric sources with image sources
seg_xy, ref_xy : astropy.Table
Source catalog and reference catalog, respectively, used for
determining the offset. Each catalog includes sources for the entire
field-of-view, not just a single chip.
"""
# Interpret input image to generate initial source catalog and WCS
if isinstance(filename, str):
image = pf.open(filename)
rootname = filename.split("_")[0]
else:
image = filename
rootname = image[0].header['rootname']
# check to see whether reference catalog can be found
if not os.path.exists(reference):
log.info("Could not find input reference catalog: {}".format(reference))
raise FileNotFoundError
# Extract reference WCS from image
if refwcs is None:
refwcs = build_self_reference(image, clean_wcs=True)
log.info("Computing offset for field-of-view defined by:")
log.info(refwcs)
# read in reference catalog
if isinstance(reference, str):
refcat = ascii.read(reference)
else:
refcat = reference
log.info("\nRead in reference catalog with {} sources.".format(len(refcat)))
ref_ra = refcat[refnames[0]]
ref_dec = refcat[refnames[1]]
# Build source catalog for entire image
img_cat = generate_source_catalog(image, refwcs, output=chip_catalog, classify=classify)
img_cat.write(filename.replace(".fits", "_xy.cat"), format='ascii.no_header',
overwrite=True)
# Retrieve source XY positions in reference frame
seg_xy = np.column_stack((img_cat['xcentroid'], img_cat['ycentroid']))
seg_xy = seg_xy[~np.isnan(seg_xy[:, 0])]
# Translate reference catalog positions into input image coordinate frame
xref, yref = refwcs.all_world2pix(ref_ra, ref_dec, 1)
# look for only sources within the viewable area of the exposure to
# determine the offset
xref, yref = within_footprint(image, refwcs, xref, yref)
ref_xy = np.column_stack((xref, yref))
log.info("\nWorking with {} astrometric sources for this field".format(len(ref_xy)))
# write out astrometric reference catalog that was actually used
ref_ra_img, ref_dec_img = refwcs.all_pix2world(xref, yref, 1)
ref_tab = Table([ref_ra_img, ref_dec_img, xref, yref], names=['ra', 'dec', 'x', 'y'])
ref_tab.write(reference.replace('.cat', '_{}.cat'.format(rootname)),
format='ascii.fast_commented_header', overwrite=True)
searchrad = search_radius / refwcs.pscale
# Use 2d-Histogram builder from drizzlepac.tweakreg -- for demo only...
xp, yp, nmatches, zpqual = build_xy_zeropoint(seg_xy, ref_xy,
searchrad=searchrad,
histplot=False, figure_id=1,
plotname=None, interactive=False)
hist2d_offset = (xp, yp)
log.info('best offset {} based on {} cross-matches'.format(hist2d_offset, nmatches))
return hist2d_offset, seg_xy, ref_xy | Iteratively look for the best cross-match between the catalog and ref.
Parameters
----------
filename : `~astropy.io.fits.HDUList` or str
Single image to extract sources for matching to
the external astrometric catalog.
reference : str or `~astropy.table.Table`
Reference catalog, either as a filename or ``astropy.Table``
containing astrometrically accurate sky coordinates for astrometric
standard sources.
refwcs : `~stwcs.wcsutil.HSTWCS`
This WCS will define the coordinate frame which will
be used to determine the offset. If None is specified, use the
WCS from the input image `filename` to build this WCS using
`build_self_reference()`.
refnames : list
List of table column names for sky coordinates of astrometric
standard sources from reference catalog.
match_tolerance : float
Tolerance (in pixels) for recognizing that a source position matches
an astrometric catalog position. Larger values allow for lower
accuracy source positions to be compared to astrometric catalog
chip_catalog : bool
Specify whether or not to write out individual source catalog for
each chip in the image.
search_radius : float
Maximum separation (in arcseconds) from source positions to look
for valid cross-matches with reference source positions.
min_match : int
Minimum number of cross-matches for an acceptable determination of
the offset.
classify : bool
Specify whether or not to use central_moments classification to
ignore likely cosmic-rays/bad-pixels when generating the source
catalog.
Returns
-------
best_offset : tuple
Offset in input image pixels between image source positions and
astrometric catalog positions that results in largest number of
matches of astrometric sources with image sources
seg_xy, ref_xy : astropy.Table
Source catalog and reference catalog, respectively, used for
determining the offset. Each catalog includes sources for the entire
field-of-view, not just a single chip. | Below is the the instruction that describes the task:
### Input:
Iteratively look for the best cross-match between the catalog and ref.
Parameters
----------
filename : `~astropy.io.fits.HDUList` or str
Single image to extract sources for matching to
the external astrometric catalog.
reference : str or `~astropy.table.Table`
Reference catalog, either as a filename or ``astropy.Table``
containing astrometrically accurate sky coordinates for astrometric
standard sources.
refwcs : `~stwcs.wcsutil.HSTWCS`
This WCS will define the coordinate frame which will
be used to determine the offset. If None is specified, use the
WCS from the input image `filename` to build this WCS using
`build_self_reference()`.
refnames : list
List of table column names for sky coordinates of astrometric
standard sources from reference catalog.
match_tolerance : float
Tolerance (in pixels) for recognizing that a source position matches
an astrometric catalog position. Larger values allow for lower
accuracy source positions to be compared to astrometric catalog
chip_catalog : bool
Specify whether or not to write out individual source catalog for
each chip in the image.
search_radius : float
Maximum separation (in arcseconds) from source positions to look
for valid cross-matches with reference source positions.
min_match : int
Minimum number of cross-matches for an acceptable determination of
the offset.
classify : bool
Specify whether or not to use central_moments classification to
ignore likely cosmic-rays/bad-pixels when generating the source
catalog.
Returns
-------
best_offset : tuple
Offset in input image pixels between image source positions and
astrometric catalog positions that results in largest number of
matches of astrometric sources with image sources
seg_xy, ref_xy : astropy.Table
Source catalog and reference catalog, respectively, used for
determining the offset. Each catalog includes sources for the entire
field-of-view, not just a single chip.
### Response:
def find_hist2d_offset(filename, reference, refwcs=None, refnames=['ra', 'dec'],
match_tolerance=5., chip_catalog=True, search_radius=15.0,
min_match=10, classify=True):
"""Iteratively look for the best cross-match between the catalog and ref.
Parameters
----------
filename : `~astropy.io.fits.HDUList` or str
Single image to extract sources for matching to
the external astrometric catalog.
reference : str or `~astropy.table.Table`
Reference catalog, either as a filename or ``astropy.Table``
containing astrometrically accurate sky coordinates for astrometric
standard sources.
refwcs : `~stwcs.wcsutil.HSTWCS`
This WCS will define the coordinate frame which will
be used to determine the offset. If None is specified, use the
WCS from the input image `filename` to build this WCS using
`build_self_reference()`.
refnames : list
List of table column names for sky coordinates of astrometric
standard sources from reference catalog.
match_tolerance : float
Tolerance (in pixels) for recognizing that a source position matches
an astrometric catalog position. Larger values allow for lower
accuracy source positions to be compared to astrometric catalog
chip_catalog : bool
Specify whether or not to write out individual source catalog for
each chip in the image.
search_radius : float
Maximum separation (in arcseconds) from source positions to look
for valid cross-matches with reference source positions.
min_match : int
Minimum number of cross-matches for an acceptable determination of
the offset.
classify : bool
Specify whether or not to use central_moments classification to
ignore likely cosmic-rays/bad-pixels when generating the source
catalog.
Returns
-------
best_offset : tuple
Offset in input image pixels between image source positions and
astrometric catalog positions that results in largest number of
matches of astrometric sources with image sources
seg_xy, ref_xy : astropy.Table
Source catalog and reference catalog, respectively, used for
determining the offset. Each catalog includes sources for the entire
field-of-view, not just a single chip.
"""
# Interpret input image to generate initial source catalog and WCS
if isinstance(filename, str):
image = pf.open(filename)
rootname = filename.split("_")[0]
else:
image = filename
rootname = image[0].header['rootname']
# check to see whether reference catalog can be found
if not os.path.exists(reference):
log.info("Could not find input reference catalog: {}".format(reference))
raise FileNotFoundError
# Extract reference WCS from image
if refwcs is None:
refwcs = build_self_reference(image, clean_wcs=True)
log.info("Computing offset for field-of-view defined by:")
log.info(refwcs)
# read in reference catalog
if isinstance(reference, str):
refcat = ascii.read(reference)
else:
refcat = reference
log.info("\nRead in reference catalog with {} sources.".format(len(refcat)))
ref_ra = refcat[refnames[0]]
ref_dec = refcat[refnames[1]]
# Build source catalog for entire image
img_cat = generate_source_catalog(image, refwcs, output=chip_catalog, classify=classify)
img_cat.write(filename.replace(".fits", "_xy.cat"), format='ascii.no_header',
overwrite=True)
# Retrieve source XY positions in reference frame
seg_xy = np.column_stack((img_cat['xcentroid'], img_cat['ycentroid']))
seg_xy = seg_xy[~np.isnan(seg_xy[:, 0])]
# Translate reference catalog positions into input image coordinate frame
xref, yref = refwcs.all_world2pix(ref_ra, ref_dec, 1)
# look for only sources within the viewable area of the exposure to
# determine the offset
xref, yref = within_footprint(image, refwcs, xref, yref)
ref_xy = np.column_stack((xref, yref))
log.info("\nWorking with {} astrometric sources for this field".format(len(ref_xy)))
# write out astrometric reference catalog that was actually used
ref_ra_img, ref_dec_img = refwcs.all_pix2world(xref, yref, 1)
ref_tab = Table([ref_ra_img, ref_dec_img, xref, yref], names=['ra', 'dec', 'x', 'y'])
ref_tab.write(reference.replace('.cat', '_{}.cat'.format(rootname)),
format='ascii.fast_commented_header', overwrite=True)
searchrad = search_radius / refwcs.pscale
# Use 2d-Histogram builder from drizzlepac.tweakreg -- for demo only...
xp, yp, nmatches, zpqual = build_xy_zeropoint(seg_xy, ref_xy,
searchrad=searchrad,
histplot=False, figure_id=1,
plotname=None, interactive=False)
hist2d_offset = (xp, yp)
log.info('best offset {} based on {} cross-matches'.format(hist2d_offset, nmatches))
return hist2d_offset, seg_xy, ref_xy |
def add(self, match, handler):
"""Register a handler with the Router.
:param match: The first argument passed to the :meth:`match` method
when checking against this handler.
:param handler: A callable or :class:`Route` instance that will handle
matching calls. If not a Route instance, will be wrapped in one.
"""
self.routes.append((match, (
Route(handler) if not isinstance(handler, Route)
else handler
))) | Register a handler with the Router.
:param match: The first argument passed to the :meth:`match` method
when checking against this handler.
:param handler: A callable or :class:`Route` instance that will handle
matching calls. If not a Route instance, will be wrapped in one. | Below is the the instruction that describes the task:
### Input:
Register a handler with the Router.
:param match: The first argument passed to the :meth:`match` method
when checking against this handler.
:param handler: A callable or :class:`Route` instance that will handle
matching calls. If not a Route instance, will be wrapped in one.
### Response:
def add(self, match, handler):
"""Register a handler with the Router.
:param match: The first argument passed to the :meth:`match` method
when checking against this handler.
:param handler: A callable or :class:`Route` instance that will handle
matching calls. If not a Route instance, will be wrapped in one.
"""
self.routes.append((match, (
Route(handler) if not isinstance(handler, Route)
else handler
))) |
def derived_from_all(self, identities: List[QualName]) -> MutableSet[QualName]:
"""Return list of identities transitively derived from all `identity`."""
if not identities:
return set()
res = self.derived_from(identities[0])
for id in identities[1:]:
res &= self.derived_from(id)
return res | Return list of identities transitively derived from all `identity`. | Below is the the instruction that describes the task:
### Input:
Return list of identities transitively derived from all `identity`.
### Response:
def derived_from_all(self, identities: List[QualName]) -> MutableSet[QualName]:
"""Return list of identities transitively derived from all `identity`."""
if not identities:
return set()
res = self.derived_from(identities[0])
for id in identities[1:]:
res &= self.derived_from(id)
return res |
def grasstruth(args):
"""
%prog grasstruth james-pan-grass.txt
Prepare truth pairs for 4 grasses.
"""
p = OptionParser(grasstruth.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
james, = args
fp = open(james)
pairs = set()
for row in fp:
atoms = row.split()
genes = []
idx = {}
for i, a in enumerate(atoms):
aa = a.split("||")
for ma in aa:
idx[ma] = i
genes.extend(aa)
genes = [x for x in genes if ":" not in x]
Os = [x for x in genes if x.startswith("Os")]
for o in Os:
for g in genes:
if idx[o] == idx[g]:
continue
pairs.add(tuple(sorted((o, g))))
for a, b in sorted(pairs):
print("\t".join((a, b))) | %prog grasstruth james-pan-grass.txt
Prepare truth pairs for 4 grasses. | Below is the the instruction that describes the task:
### Input:
%prog grasstruth james-pan-grass.txt
Prepare truth pairs for 4 grasses.
### Response:
def grasstruth(args):
"""
%prog grasstruth james-pan-grass.txt
Prepare truth pairs for 4 grasses.
"""
p = OptionParser(grasstruth.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
james, = args
fp = open(james)
pairs = set()
for row in fp:
atoms = row.split()
genes = []
idx = {}
for i, a in enumerate(atoms):
aa = a.split("||")
for ma in aa:
idx[ma] = i
genes.extend(aa)
genes = [x for x in genes if ":" not in x]
Os = [x for x in genes if x.startswith("Os")]
for o in Os:
for g in genes:
if idx[o] == idx[g]:
continue
pairs.add(tuple(sorted((o, g))))
for a, b in sorted(pairs):
print("\t".join((a, b))) |
def content_list(self, key, model):
"""Returns the list of content IDs for a given model.
This method maps to
https://github.com/exosite/docs/tree/master/provision#get---list-content-ids
Args:
key: The CIK or Token for the device
model:
"""
path = PROVISION_MANAGE_CONTENT + model + '/'
return self._request(path, key, '', 'GET', self._manage_by_cik) | Returns the list of content IDs for a given model.
This method maps to
https://github.com/exosite/docs/tree/master/provision#get---list-content-ids
Args:
key: The CIK or Token for the device
model: | Below is the the instruction that describes the task:
### Input:
Returns the list of content IDs for a given model.
This method maps to
https://github.com/exosite/docs/tree/master/provision#get---list-content-ids
Args:
key: The CIK or Token for the device
model:
### Response:
def content_list(self, key, model):
"""Returns the list of content IDs for a given model.
This method maps to
https://github.com/exosite/docs/tree/master/provision#get---list-content-ids
Args:
key: The CIK or Token for the device
model:
"""
path = PROVISION_MANAGE_CONTENT + model + '/'
return self._request(path, key, '', 'GET', self._manage_by_cik) |
def exerciseOptions(self, id, contract, exerciseAction, exerciseQuantity, account, override):
"""exerciseOptions(EClient self, TickerId id, Contract contract, int exerciseAction, int exerciseQuantity, IBString const & account, int override)"""
return _swigibpy.EClient_exerciseOptions(self, id, contract, exerciseAction, exerciseQuantity, account, override) | exerciseOptions(EClient self, TickerId id, Contract contract, int exerciseAction, int exerciseQuantity, IBString const & account, int override) | Below is the the instruction that describes the task:
### Input:
exerciseOptions(EClient self, TickerId id, Contract contract, int exerciseAction, int exerciseQuantity, IBString const & account, int override)
### Response:
def exerciseOptions(self, id, contract, exerciseAction, exerciseQuantity, account, override):
"""exerciseOptions(EClient self, TickerId id, Contract contract, int exerciseAction, int exerciseQuantity, IBString const & account, int override)"""
return _swigibpy.EClient_exerciseOptions(self, id, contract, exerciseAction, exerciseQuantity, account, override) |
def execute(self, fragment, pretty_format=True):
"""
Run or aggregate a query fragment
Concat the fragment to any stored fragments. If they form a complete
query, run it and return the result. If not, store them and return
None.
"""
self.fragments = (self.fragments + "\n" + fragment).lstrip()
try:
line_parser.parseString(self.fragments)
except ParseException:
pass
else:
self.last_query = self.fragments.strip()
self.fragments = ""
return super(FragmentEngine, self).execute(self.last_query, pretty_format)
return None | Run or aggregate a query fragment
Concat the fragment to any stored fragments. If they form a complete
query, run it and return the result. If not, store them and return
None. | Below is the the instruction that describes the task:
### Input:
Run or aggregate a query fragment
Concat the fragment to any stored fragments. If they form a complete
query, run it and return the result. If not, store them and return
None.
### Response:
def execute(self, fragment, pretty_format=True):
"""
Run or aggregate a query fragment
Concat the fragment to any stored fragments. If they form a complete
query, run it and return the result. If not, store them and return
None.
"""
self.fragments = (self.fragments + "\n" + fragment).lstrip()
try:
line_parser.parseString(self.fragments)
except ParseException:
pass
else:
self.last_query = self.fragments.strip()
self.fragments = ""
return super(FragmentEngine, self).execute(self.last_query, pretty_format)
return None |
def startup_walk_for_missed_files(self):
"""Check xlog and xlog_incoming directories for files that receivexlog has received but not yet
compressed as well as the files we have compressed but not yet uploaded and process them."""
for site in self.config["backup_sites"]:
compressed_xlog_path, _ = self.create_backup_site_paths(site)
uncompressed_xlog_path = compressed_xlog_path + "_incoming"
# Process uncompressed files (ie WAL pg_receivexlog received)
for filename in os.listdir(uncompressed_xlog_path):
full_path = os.path.join(uncompressed_xlog_path, filename)
if not wal.WAL_RE.match(filename) and not wal.TIMELINE_RE.match(filename):
self.log.warning("Found invalid file %r from incoming xlog directory", full_path)
continue
compression_event = {
"delete_file_after_compression": True,
"full_path": full_path,
"site": site,
"src_path": "{}.partial",
"type": "MOVE",
}
self.log.debug("Found: %r when starting up, adding to compression queue", compression_event)
self.compression_queue.put(compression_event)
# Process compressed files (ie things we've processed but not yet uploaded)
for filename in os.listdir(compressed_xlog_path):
if filename.endswith(".metadata"):
continue # silently ignore .metadata files, they're expected and processed below
full_path = os.path.join(compressed_xlog_path, filename)
metadata_path = full_path + ".metadata"
is_xlog = wal.WAL_RE.match(filename)
is_timeline = wal.TIMELINE_RE.match(filename)
if not ((is_xlog or is_timeline) and os.path.exists(metadata_path)):
self.log.warning("Found invalid file %r from compressed xlog directory", full_path)
continue
with open(metadata_path, "r") as fp:
metadata = json.load(fp)
transfer_event = {
"file_size": os.path.getsize(full_path),
"filetype": "xlog" if is_xlog else "timeline",
"local_path": full_path,
"metadata": metadata,
"site": site,
"type": "UPLOAD",
}
self.log.debug("Found: %r when starting up, adding to transfer queue", transfer_event)
self.transfer_queue.put(transfer_event) | Check xlog and xlog_incoming directories for files that receivexlog has received but not yet
compressed as well as the files we have compressed but not yet uploaded and process them. | Below is the the instruction that describes the task:
### Input:
Check xlog and xlog_incoming directories for files that receivexlog has received but not yet
compressed as well as the files we have compressed but not yet uploaded and process them.
### Response:
def startup_walk_for_missed_files(self):
"""Check xlog and xlog_incoming directories for files that receivexlog has received but not yet
compressed as well as the files we have compressed but not yet uploaded and process them."""
for site in self.config["backup_sites"]:
compressed_xlog_path, _ = self.create_backup_site_paths(site)
uncompressed_xlog_path = compressed_xlog_path + "_incoming"
# Process uncompressed files (ie WAL pg_receivexlog received)
for filename in os.listdir(uncompressed_xlog_path):
full_path = os.path.join(uncompressed_xlog_path, filename)
if not wal.WAL_RE.match(filename) and not wal.TIMELINE_RE.match(filename):
self.log.warning("Found invalid file %r from incoming xlog directory", full_path)
continue
compression_event = {
"delete_file_after_compression": True,
"full_path": full_path,
"site": site,
"src_path": "{}.partial",
"type": "MOVE",
}
self.log.debug("Found: %r when starting up, adding to compression queue", compression_event)
self.compression_queue.put(compression_event)
# Process compressed files (ie things we've processed but not yet uploaded)
for filename in os.listdir(compressed_xlog_path):
if filename.endswith(".metadata"):
continue # silently ignore .metadata files, they're expected and processed below
full_path = os.path.join(compressed_xlog_path, filename)
metadata_path = full_path + ".metadata"
is_xlog = wal.WAL_RE.match(filename)
is_timeline = wal.TIMELINE_RE.match(filename)
if not ((is_xlog or is_timeline) and os.path.exists(metadata_path)):
self.log.warning("Found invalid file %r from compressed xlog directory", full_path)
continue
with open(metadata_path, "r") as fp:
metadata = json.load(fp)
transfer_event = {
"file_size": os.path.getsize(full_path),
"filetype": "xlog" if is_xlog else "timeline",
"local_path": full_path,
"metadata": metadata,
"site": site,
"type": "UPLOAD",
}
self.log.debug("Found: %r when starting up, adding to transfer queue", transfer_event)
self.transfer_queue.put(transfer_event) |
def check_subprocess(cmd, source, outname):
"""Run the command to resize the video and remove the output file if the
processing fails.
"""
logger = logging.getLogger(__name__)
try:
res = subprocess.run(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except KeyboardInterrupt:
logger.debug('Process terminated, removing file %s', outname)
if os.path.isfile(outname):
os.remove(outname)
raise
if res.returncode:
logger.debug('STDOUT:\n %s', res.stdout.decode('utf8'))
logger.debug('STDERR:\n %s', res.stderr.decode('utf8'))
if os.path.isfile(outname):
logger.debug('Removing file %s', outname)
os.remove(outname)
raise SubprocessException('Failed to process ' + source) | Run the command to resize the video and remove the output file if the
processing fails. | Below is the the instruction that describes the task:
### Input:
Run the command to resize the video and remove the output file if the
processing fails.
### Response:
def check_subprocess(cmd, source, outname):
"""Run the command to resize the video and remove the output file if the
processing fails.
"""
logger = logging.getLogger(__name__)
try:
res = subprocess.run(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except KeyboardInterrupt:
logger.debug('Process terminated, removing file %s', outname)
if os.path.isfile(outname):
os.remove(outname)
raise
if res.returncode:
logger.debug('STDOUT:\n %s', res.stdout.decode('utf8'))
logger.debug('STDERR:\n %s', res.stderr.decode('utf8'))
if os.path.isfile(outname):
logger.debug('Removing file %s', outname)
os.remove(outname)
raise SubprocessException('Failed to process ' + source) |
def load_labware(
self, labware_obj: Labware,
location: types.DeckLocation) -> Labware:
""" Specify the presence of a piece of labware on the OT2 deck.
This function loads the labware specified by `labware`
(previously loaded from a configuration file) to the location
specified by `location`.
:param Labware labware: The labware object to load
:param location: The slot into which to load the labware such as
1 or '1'
:type location: int or str
"""
self._deck_layout[location] = labware_obj
return labware_obj | Specify the presence of a piece of labware on the OT2 deck.
This function loads the labware specified by `labware`
(previously loaded from a configuration file) to the location
specified by `location`.
:param Labware labware: The labware object to load
:param location: The slot into which to load the labware such as
1 or '1'
:type location: int or str | Below is the the instruction that describes the task:
### Input:
Specify the presence of a piece of labware on the OT2 deck.
This function loads the labware specified by `labware`
(previously loaded from a configuration file) to the location
specified by `location`.
:param Labware labware: The labware object to load
:param location: The slot into which to load the labware such as
1 or '1'
:type location: int or str
### Response:
def load_labware(
self, labware_obj: Labware,
location: types.DeckLocation) -> Labware:
""" Specify the presence of a piece of labware on the OT2 deck.
This function loads the labware specified by `labware`
(previously loaded from a configuration file) to the location
specified by `location`.
:param Labware labware: The labware object to load
:param location: The slot into which to load the labware such as
1 or '1'
:type location: int or str
"""
self._deck_layout[location] = labware_obj
return labware_obj |
def data(offset, bytes):
"""Return Data record. This constructs the full record, including
the length information, the record type (0x00), the
checksum, and the offset.
@param offset load offset of first byte.
@param bytes list of byte values to pack into record.
@return String representation of one HEX record
"""
assert 0 <= offset < 65536
assert 0 < len(bytes) < 256
b = [len(bytes), (offset>>8)&0x0FF, offset&0x0FF, 0x00] + bytes
return Record._from_bytes(b) | Return Data record. This constructs the full record, including
the length information, the record type (0x00), the
checksum, and the offset.
@param offset load offset of first byte.
@param bytes list of byte values to pack into record.
@return String representation of one HEX record | Below is the the instruction that describes the task:
### Input:
Return Data record. This constructs the full record, including
the length information, the record type (0x00), the
checksum, and the offset.
@param offset load offset of first byte.
@param bytes list of byte values to pack into record.
@return String representation of one HEX record
### Response:
def data(offset, bytes):
"""Return Data record. This constructs the full record, including
the length information, the record type (0x00), the
checksum, and the offset.
@param offset load offset of first byte.
@param bytes list of byte values to pack into record.
@return String representation of one HEX record
"""
assert 0 <= offset < 65536
assert 0 < len(bytes) < 256
b = [len(bytes), (offset>>8)&0x0FF, offset&0x0FF, 0x00] + bytes
return Record._from_bytes(b) |
def show_menu(self, status_icon, button, activate_time):
"""Show the tray icon menu.
"""
menu = self.get_widget('tray-menu')
menu.popup(None, None, None, Gtk.StatusIcon.position_menu, button, activate_time) | Show the tray icon menu. | Below is the the instruction that describes the task:
### Input:
Show the tray icon menu.
### Response:
def show_menu(self, status_icon, button, activate_time):
"""Show the tray icon menu.
"""
menu = self.get_widget('tray-menu')
menu.popup(None, None, None, Gtk.StatusIcon.position_menu, button, activate_time) |
def draw_hydrogen_bonds(self,color="black"):
"""For each bond that has been determined to be important, a line gets drawn.
"""
self.draw_hbonds=""
if self.hbonds!=None:
for bond in self.hbonds.hbonds_for_drawing:
x = str((self.molecule.x_dim-self.molecule.molsize1)/2)
y = str((self.molecule.y_dim-self.molecule.molsize2)/2)
self.draw_hbonds ="<g id='"+str(bond[0])+"' class='HBonds' transform='translate("+x+","+y+")' x='"+x+"' y='"+y+"'>'"
atom = self.topology_data.universe.atoms[bond[0]-1] #zero-based index vs one-based index
residue = (atom.resname, str(atom.resid), atom.segid)
if bond[2] in ["N","O","H"]:
#backbone interactions
self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:white;stroke-width:15' />"
self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:"+color+";stroke-width:4' />"
else:
#sidechain interactions
self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:white;stroke-width:15' />"
self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' stroke-dasharray='5,5' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:"+color+";stroke-width:4' />"
self.draw_hbonds+="</g>" | For each bond that has been determined to be important, a line gets drawn. | Below is the the instruction that describes the task:
### Input:
For each bond that has been determined to be important, a line gets drawn.
### Response:
def draw_hydrogen_bonds(self,color="black"):
"""For each bond that has been determined to be important, a line gets drawn.
"""
self.draw_hbonds=""
if self.hbonds!=None:
for bond in self.hbonds.hbonds_for_drawing:
x = str((self.molecule.x_dim-self.molecule.molsize1)/2)
y = str((self.molecule.y_dim-self.molecule.molsize2)/2)
self.draw_hbonds ="<g id='"+str(bond[0])+"' class='HBonds' transform='translate("+x+","+y+")' x='"+x+"' y='"+y+"'>'"
atom = self.topology_data.universe.atoms[bond[0]-1] #zero-based index vs one-based index
residue = (atom.resname, str(atom.resid), atom.segid)
if bond[2] in ["N","O","H"]:
#backbone interactions
self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:white;stroke-width:15' />"
self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:"+color+";stroke-width:4' />"
else:
#sidechain interactions
self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:white;stroke-width:15' />"
self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' stroke-dasharray='5,5' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:"+color+";stroke-width:4' />"
self.draw_hbonds+="</g>" |
def vrrp_config(app, interface, config):
"""create an instance.
returns EventVRRPConfigReply(instance.name, interface, config)
on success.
returns EventVRRPConfigReply(None, interface, config)
on failure.
"""
config_request = vrrp_event.EventVRRPConfigRequest(interface, config)
config_request.sync = True
return app.send_request(config_request) | create an instance.
returns EventVRRPConfigReply(instance.name, interface, config)
on success.
returns EventVRRPConfigReply(None, interface, config)
on failure. | Below is the the instruction that describes the task:
### Input:
create an instance.
returns EventVRRPConfigReply(instance.name, interface, config)
on success.
returns EventVRRPConfigReply(None, interface, config)
on failure.
### Response:
def vrrp_config(app, interface, config):
"""create an instance.
returns EventVRRPConfigReply(instance.name, interface, config)
on success.
returns EventVRRPConfigReply(None, interface, config)
on failure.
"""
config_request = vrrp_event.EventVRRPConfigRequest(interface, config)
config_request.sync = True
return app.send_request(config_request) |
def field_type_schema(
field: Field,
*,
by_alias: bool,
model_name_map: Dict[Type['main.BaseModel'], str],
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Used by ``field_schema()``, you probably should be using that function.
Take a single ``field`` and generate the schema for its type only, not including additional
information as title, etc. Also return additional schema definitions, from sub-models.
"""
definitions = {}
ref_prefix = ref_prefix or default_prefix
if field.shape is Shape.LIST:
f_schema, f_definitions = field_singleton_schema(
field, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
definitions.update(f_definitions)
return {'type': 'array', 'items': f_schema}, definitions
elif field.shape is Shape.SET:
f_schema, f_definitions = field_singleton_schema(
field, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
definitions.update(f_definitions)
return {'type': 'array', 'uniqueItems': True, 'items': f_schema}, definitions
elif field.shape is Shape.MAPPING:
dict_schema: Dict[str, Any] = {'type': 'object'}
key_field = cast(Field, field.key_field)
regex = getattr(key_field.type_, 'regex', None)
f_schema, f_definitions = field_singleton_schema(
field, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
definitions.update(f_definitions)
if regex:
# Dict keys have a regex pattern
# f_schema might be a schema or empty dict, add it either way
dict_schema['patternProperties'] = {regex.pattern: f_schema}
elif f_schema:
# The dict values are not simply Any, so they need a schema
dict_schema['additionalProperties'] = f_schema
return dict_schema, definitions
elif field.shape is Shape.TUPLE:
sub_schema = []
sub_fields = cast(List[Field], field.sub_fields)
for sf in sub_fields:
sf_schema, sf_definitions = field_type_schema(
sf, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
definitions.update(sf_definitions)
sub_schema.append(sf_schema)
if len(sub_schema) == 1:
sub_schema = sub_schema[0] # type: ignore
return {'type': 'array', 'items': sub_schema}, definitions
else:
assert field.shape is Shape.SINGLETON, field.shape
f_schema, f_definitions = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
)
definitions.update(f_definitions)
return f_schema, definitions | Used by ``field_schema()``, you probably should be using that function.
Take a single ``field`` and generate the schema for its type only, not including additional
information as title, etc. Also return additional schema definitions, from sub-models. | Below is the the instruction that describes the task:
### Input:
Used by ``field_schema()``, you probably should be using that function.
Take a single ``field`` and generate the schema for its type only, not including additional
information as title, etc. Also return additional schema definitions, from sub-models.
### Response:
def field_type_schema(
field: Field,
*,
by_alias: bool,
model_name_map: Dict[Type['main.BaseModel'], str],
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Used by ``field_schema()``, you probably should be using that function.
Take a single ``field`` and generate the schema for its type only, not including additional
information as title, etc. Also return additional schema definitions, from sub-models.
"""
definitions = {}
ref_prefix = ref_prefix or default_prefix
if field.shape is Shape.LIST:
f_schema, f_definitions = field_singleton_schema(
field, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
definitions.update(f_definitions)
return {'type': 'array', 'items': f_schema}, definitions
elif field.shape is Shape.SET:
f_schema, f_definitions = field_singleton_schema(
field, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
definitions.update(f_definitions)
return {'type': 'array', 'uniqueItems': True, 'items': f_schema}, definitions
elif field.shape is Shape.MAPPING:
dict_schema: Dict[str, Any] = {'type': 'object'}
key_field = cast(Field, field.key_field)
regex = getattr(key_field.type_, 'regex', None)
f_schema, f_definitions = field_singleton_schema(
field, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
definitions.update(f_definitions)
if regex:
# Dict keys have a regex pattern
# f_schema might be a schema or empty dict, add it either way
dict_schema['patternProperties'] = {regex.pattern: f_schema}
elif f_schema:
# The dict values are not simply Any, so they need a schema
dict_schema['additionalProperties'] = f_schema
return dict_schema, definitions
elif field.shape is Shape.TUPLE:
sub_schema = []
sub_fields = cast(List[Field], field.sub_fields)
for sf in sub_fields:
sf_schema, sf_definitions = field_type_schema(
sf, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix
)
definitions.update(sf_definitions)
sub_schema.append(sf_schema)
if len(sub_schema) == 1:
sub_schema = sub_schema[0] # type: ignore
return {'type': 'array', 'items': sub_schema}, definitions
else:
assert field.shape is Shape.SINGLETON, field.shape
f_schema, f_definitions = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
)
definitions.update(f_definitions)
return f_schema, definitions |
def _delete(self, url, data, scope):
"""
Make a DELETE request using the session object to a Degreed endpoint.
Args:
url (str): The url to send a DELETE request to.
data (str): The json encoded payload to DELETE.
scope (str): Must be one of the scopes Degreed expects:
- `CONTENT_PROVIDER_SCOPE`
- `COMPLETION_PROVIDER_SCOPE`
"""
self._create_session(scope)
response = self.session.delete(url, data=data)
return response.status_code, response.text | Make a DELETE request using the session object to a Degreed endpoint.
Args:
url (str): The url to send a DELETE request to.
data (str): The json encoded payload to DELETE.
scope (str): Must be one of the scopes Degreed expects:
- `CONTENT_PROVIDER_SCOPE`
- `COMPLETION_PROVIDER_SCOPE` | Below is the the instruction that describes the task:
### Input:
Make a DELETE request using the session object to a Degreed endpoint.
Args:
url (str): The url to send a DELETE request to.
data (str): The json encoded payload to DELETE.
scope (str): Must be one of the scopes Degreed expects:
- `CONTENT_PROVIDER_SCOPE`
- `COMPLETION_PROVIDER_SCOPE`
### Response:
def _delete(self, url, data, scope):
"""
Make a DELETE request using the session object to a Degreed endpoint.
Args:
url (str): The url to send a DELETE request to.
data (str): The json encoded payload to DELETE.
scope (str): Must be one of the scopes Degreed expects:
- `CONTENT_PROVIDER_SCOPE`
- `COMPLETION_PROVIDER_SCOPE`
"""
self._create_session(scope)
response = self.session.delete(url, data=data)
return response.status_code, response.text |
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
app.cli.add_command(cmd)
app.extensions['invenio-communities'] = self
# Register the jinja do extension
app.jinja_env.add_extension('jinja2.ext.do')
self.register_signals(app) | Flask application initialization. | Below is the the instruction that describes the task:
### Input:
Flask application initialization.
### Response:
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
app.cli.add_command(cmd)
app.extensions['invenio-communities'] = self
# Register the jinja do extension
app.jinja_env.add_extension('jinja2.ext.do')
self.register_signals(app) |
def _step_decorator_args(self, decorator):
"""
Get the arguments passed to step decorators
converted to python objects.
"""
args = decorator.children[3:-2]
step = None
if len(args) == 1:
try:
step = ast.literal_eval(args[0].get_code())
except (ValueError, SyntaxError):
pass
if isinstance(step, six.string_types+(list,)):
return step
logging.error("Decorator step accepts either a string or a list of strings - %s:%d",
self.file_path, decorator.start_pos[0])
else:
logging.error("Decorator step accepts only one argument - %s:%d",
self.file_path, decorator.start_pos[0]) | Get the arguments passed to step decorators
converted to python objects. | Below is the the instruction that describes the task:
### Input:
Get the arguments passed to step decorators
converted to python objects.
### Response:
def _step_decorator_args(self, decorator):
"""
Get the arguments passed to step decorators
converted to python objects.
"""
args = decorator.children[3:-2]
step = None
if len(args) == 1:
try:
step = ast.literal_eval(args[0].get_code())
except (ValueError, SyntaxError):
pass
if isinstance(step, six.string_types+(list,)):
return step
logging.error("Decorator step accepts either a string or a list of strings - %s:%d",
self.file_path, decorator.start_pos[0])
else:
logging.error("Decorator step accepts only one argument - %s:%d",
self.file_path, decorator.start_pos[0]) |
def hex_from(val):
"""Returns hex string representation for a given value.
:param bytes|str|unicode|int|long val:
:rtype: bytes|str
"""
if isinstance(val, integer_types):
hex_str = '%x' % val
if len(hex_str) % 2:
hex_str = '0' + hex_str
return hex_str
return hexlify(val) | Returns hex string representation for a given value.
:param bytes|str|unicode|int|long val:
:rtype: bytes|str | Below is the the instruction that describes the task:
### Input:
Returns hex string representation for a given value.
:param bytes|str|unicode|int|long val:
:rtype: bytes|str
### Response:
def hex_from(val):
"""Returns hex string representation for a given value.
:param bytes|str|unicode|int|long val:
:rtype: bytes|str
"""
if isinstance(val, integer_types):
hex_str = '%x' % val
if len(hex_str) % 2:
hex_str = '0' + hex_str
return hex_str
return hexlify(val) |
def instantiate(self, params, auth=None):
"""
Allows you to fetch the map tiles of a created map
:param params: The json with the styling info for the named map
:param auth: The auth client
:type params: dict
:type auth: :class:`carto.auth.APIKeyAuthClient`
:return:
:raise: CartoException
"""
try:
endpoint = (self.Meta.collection_endpoint
+ "{template_id}"). \
format(template_id=self.template_id)
if (auth is not None):
endpoint = (endpoint + "?auth_token={auth_token}"). \
format(auth_token=auth)
self.send(endpoint, "POST", json=params)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e) | Allows you to fetch the map tiles of a created map
:param params: The json with the styling info for the named map
:param auth: The auth client
:type params: dict
:type auth: :class:`carto.auth.APIKeyAuthClient`
:return:
:raise: CartoException | Below is the the instruction that describes the task:
### Input:
Allows you to fetch the map tiles of a created map
:param params: The json with the styling info for the named map
:param auth: The auth client
:type params: dict
:type auth: :class:`carto.auth.APIKeyAuthClient`
:return:
:raise: CartoException
### Response:
def instantiate(self, params, auth=None):
"""
Allows you to fetch the map tiles of a created map
:param params: The json with the styling info for the named map
:param auth: The auth client
:type params: dict
:type auth: :class:`carto.auth.APIKeyAuthClient`
:return:
:raise: CartoException
"""
try:
endpoint = (self.Meta.collection_endpoint
+ "{template_id}"). \
format(template_id=self.template_id)
if (auth is not None):
endpoint = (endpoint + "?auth_token={auth_token}"). \
format(auth_token=auth)
self.send(endpoint, "POST", json=params)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e) |
def require(self, key: str) -> str:
"""
Returns a configuration value by its given key. If it doesn't exist, an error is thrown.
:param str key: The requested configuration key.
:return: The configuration key's value.
:rtype: str
:raises ConfigMissingError: The configuration value did not exist.
"""
v = self.get(key)
if v is None:
raise ConfigMissingError(self.full_key(key))
return v | Returns a configuration value by its given key. If it doesn't exist, an error is thrown.
:param str key: The requested configuration key.
:return: The configuration key's value.
:rtype: str
:raises ConfigMissingError: The configuration value did not exist. | Below is the the instruction that describes the task:
### Input:
Returns a configuration value by its given key. If it doesn't exist, an error is thrown.
:param str key: The requested configuration key.
:return: The configuration key's value.
:rtype: str
:raises ConfigMissingError: The configuration value did not exist.
### Response:
def require(self, key: str) -> str:
"""
Returns a configuration value by its given key. If it doesn't exist, an error is thrown.
:param str key: The requested configuration key.
:return: The configuration key's value.
:rtype: str
:raises ConfigMissingError: The configuration value did not exist.
"""
v = self.get(key)
if v is None:
raise ConfigMissingError(self.full_key(key))
return v |
def _handle_no_candidates(self):
"""
If we fail to find a good candidate we need to find something else.
"""
# since we've not found a good candidate we're should help this
if self.dom is not None and len(self.dom):
dom = prep_article(self.dom)
dom = build_base_document(dom, self._return_fragment)
return self._remove_orphans(
dom.get_element_by_id("readabilityBody"))
else:
logger.info("No document to use.")
return build_error_document(self._return_fragment) | If we fail to find a good candidate we need to find something else. | Below is the the instruction that describes the task:
### Input:
If we fail to find a good candidate we need to find something else.
### Response:
def _handle_no_candidates(self):
"""
If we fail to find a good candidate we need to find something else.
"""
# since we've not found a good candidate we're should help this
if self.dom is not None and len(self.dom):
dom = prep_article(self.dom)
dom = build_base_document(dom, self._return_fragment)
return self._remove_orphans(
dom.get_element_by_id("readabilityBody"))
else:
logger.info("No document to use.")
return build_error_document(self._return_fragment) |
def run(self):
""" Run the command with supplied configuration """
device = self.model_config.torch_device()
learner = Learner(device, self.model_factory.instantiate())
# All callbacks useful for learning
callbacks = self.gather_callbacks()
# Metrics to track through this training
metrics = learner.metrics()
# Check if training was already started and potentially continue where we left off
training_info, hidden_state = self.resume_training(learner, callbacks, metrics)
# Prepare current training phase
current_phase_idx = self._select_phase_left_bound(training_info.start_epoch_idx)
current_phase = self.phases[current_phase_idx]
local_idx = training_info.start_epoch_idx - self.ladder[current_phase_idx]
current_phase.set_up_phase(training_info, learner.model, self.source)
print(current_phase.banner())
if training_info.start_epoch_idx > 0:
current_phase.restore(training_info, local_idx, learner.model, hidden_state)
training_info.on_train_begin()
for global_epoch_idx in range(training_info.start_epoch_idx + 1, self.full_number_of_epochs + 1):
iteration_phase_idx = self._select_phase_right_bound(global_epoch_idx-1)
local_idx = global_epoch_idx - self.ladder[iteration_phase_idx]
# Phase preparations
while current_phase_idx != iteration_phase_idx:
current_phase.tear_down_phase(training_info, learner.model)
current_phase_idx += 1
current_phase = self.phases[current_phase_idx]
current_phase.set_up_phase(training_info, learner.model, self.source)
print(current_phase.banner())
# Create epoch info
epoch_info = current_phase.epoch_info(training_info, global_epoch_idx, local_idx)
# Execute learning
current_phase.execute_epoch(epoch_info, learner)
# Epoch checkpoint
self.storage.checkpoint(epoch_info, learner.model)
# Tear down the last phase
if current_phase is not None:
current_phase.tear_down_phase(training_info, learner.model)
training_info.on_train_end()
return training_info | Run the command with supplied configuration | Below is the the instruction that describes the task:
### Input:
Run the command with supplied configuration
### Response:
def run(self):
""" Run the command with supplied configuration """
device = self.model_config.torch_device()
learner = Learner(device, self.model_factory.instantiate())
# All callbacks useful for learning
callbacks = self.gather_callbacks()
# Metrics to track through this training
metrics = learner.metrics()
# Check if training was already started and potentially continue where we left off
training_info, hidden_state = self.resume_training(learner, callbacks, metrics)
# Prepare current training phase
current_phase_idx = self._select_phase_left_bound(training_info.start_epoch_idx)
current_phase = self.phases[current_phase_idx]
local_idx = training_info.start_epoch_idx - self.ladder[current_phase_idx]
current_phase.set_up_phase(training_info, learner.model, self.source)
print(current_phase.banner())
if training_info.start_epoch_idx > 0:
current_phase.restore(training_info, local_idx, learner.model, hidden_state)
training_info.on_train_begin()
for global_epoch_idx in range(training_info.start_epoch_idx + 1, self.full_number_of_epochs + 1):
iteration_phase_idx = self._select_phase_right_bound(global_epoch_idx-1)
local_idx = global_epoch_idx - self.ladder[iteration_phase_idx]
# Phase preparations
while current_phase_idx != iteration_phase_idx:
current_phase.tear_down_phase(training_info, learner.model)
current_phase_idx += 1
current_phase = self.phases[current_phase_idx]
current_phase.set_up_phase(training_info, learner.model, self.source)
print(current_phase.banner())
# Create epoch info
epoch_info = current_phase.epoch_info(training_info, global_epoch_idx, local_idx)
# Execute learning
current_phase.execute_epoch(epoch_info, learner)
# Epoch checkpoint
self.storage.checkpoint(epoch_info, learner.model)
# Tear down the last phase
if current_phase is not None:
current_phase.tear_down_phase(training_info, learner.model)
training_info.on_train_end()
return training_info |
def cmap(rgbin, N=256):
'''Input an array of rgb values to generate a colormap.
:param rgbin: An [mx3] array, where m is the number of input color triplets which
are interpolated between to make the colormap that is returned. hex values
can be input instead, as [mx1] in single quotes with a #.
:param N=10: The number of levels to be interpolated to.
'''
# rgb inputs here
if not isinstance(rgbin[0], _string_types):
# normalize to be out of 1 if out of 256 instead
if rgbin.max() > 1:
rgbin = rgbin/256.
cmap = mpl.colors.LinearSegmentedColormap.from_list('mycmap', rgbin, N=N)
return cmap | Input an array of rgb values to generate a colormap.
:param rgbin: An [mx3] array, where m is the number of input color triplets which
are interpolated between to make the colormap that is returned. hex values
can be input instead, as [mx1] in single quotes with a #.
:param N=10: The number of levels to be interpolated to. | Below is the the instruction that describes the task:
### Input:
Input an array of rgb values to generate a colormap.
:param rgbin: An [mx3] array, where m is the number of input color triplets which
are interpolated between to make the colormap that is returned. hex values
can be input instead, as [mx1] in single quotes with a #.
:param N=10: The number of levels to be interpolated to.
### Response:
def cmap(rgbin, N=256):
'''Input an array of rgb values to generate a colormap.
:param rgbin: An [mx3] array, where m is the number of input color triplets which
are interpolated between to make the colormap that is returned. hex values
can be input instead, as [mx1] in single quotes with a #.
:param N=10: The number of levels to be interpolated to.
'''
# rgb inputs here
if not isinstance(rgbin[0], _string_types):
# normalize to be out of 1 if out of 256 instead
if rgbin.max() > 1:
rgbin = rgbin/256.
cmap = mpl.colors.LinearSegmentedColormap.from_list('mycmap', rgbin, N=N)
return cmap |
def add_io_hook(self, hook):
"""
Args:
hook: This hook will be invoked for every incoming and outgoing CAN frame.
Hook arguments: (direction, frame)
See FRAME_DIRECTION_*, CANFrame.
"""
def proxy(*args):
hook(*args)
self._io_hooks.append(proxy)
return self.HookRemover(lambda: self._io_hooks.remove(proxy)) | Args:
hook: This hook will be invoked for every incoming and outgoing CAN frame.
Hook arguments: (direction, frame)
See FRAME_DIRECTION_*, CANFrame. | Below is the the instruction that describes the task:
### Input:
Args:
hook: This hook will be invoked for every incoming and outgoing CAN frame.
Hook arguments: (direction, frame)
See FRAME_DIRECTION_*, CANFrame.
### Response:
def add_io_hook(self, hook):
"""
Args:
hook: This hook will be invoked for every incoming and outgoing CAN frame.
Hook arguments: (direction, frame)
See FRAME_DIRECTION_*, CANFrame.
"""
def proxy(*args):
hook(*args)
self._io_hooks.append(proxy)
return self.HookRemover(lambda: self._io_hooks.remove(proxy)) |
def to_dataset(self, dim=None, name=None):
"""Convert a DataArray to a Dataset.
Parameters
----------
dim : str, optional
Name of the dimension on this array along which to split this array
into separate variables. If not provided, this array is converted
into a Dataset of one variable.
name : str, optional
Name to substitute for this array's name. Only valid if ``dim`` is
not provided.
Returns
-------
dataset : Dataset
"""
if dim is not None and dim not in self.dims:
warnings.warn('the order of the arguments on DataArray.to_dataset '
'has changed; you now need to supply ``name`` as '
'a keyword argument',
FutureWarning, stacklevel=2)
name = dim
dim = None
if dim is not None:
if name is not None:
raise TypeError('cannot supply both dim and name arguments')
return self._to_dataset_split(dim)
else:
return self._to_dataset_whole(name) | Convert a DataArray to a Dataset.
Parameters
----------
dim : str, optional
Name of the dimension on this array along which to split this array
into separate variables. If not provided, this array is converted
into a Dataset of one variable.
name : str, optional
Name to substitute for this array's name. Only valid if ``dim`` is
not provided.
Returns
-------
dataset : Dataset | Below is the the instruction that describes the task:
### Input:
Convert a DataArray to a Dataset.
Parameters
----------
dim : str, optional
Name of the dimension on this array along which to split this array
into separate variables. If not provided, this array is converted
into a Dataset of one variable.
name : str, optional
Name to substitute for this array's name. Only valid if ``dim`` is
not provided.
Returns
-------
dataset : Dataset
### Response:
def to_dataset(self, dim=None, name=None):
"""Convert a DataArray to a Dataset.
Parameters
----------
dim : str, optional
Name of the dimension on this array along which to split this array
into separate variables. If not provided, this array is converted
into a Dataset of one variable.
name : str, optional
Name to substitute for this array's name. Only valid if ``dim`` is
not provided.
Returns
-------
dataset : Dataset
"""
if dim is not None and dim not in self.dims:
warnings.warn('the order of the arguments on DataArray.to_dataset '
'has changed; you now need to supply ``name`` as '
'a keyword argument',
FutureWarning, stacklevel=2)
name = dim
dim = None
if dim is not None:
if name is not None:
raise TypeError('cannot supply both dim and name arguments')
return self._to_dataset_split(dim)
else:
return self._to_dataset_whole(name) |
def is_broker_action_done(action, rid=None, unit=None):
"""Check whether broker action has completed yet.
@param action: name of action to be performed
@returns True if action complete otherwise False
"""
rdata = relation_get(rid, unit) or {}
broker_rsp = rdata.get(get_broker_rsp_key())
if not broker_rsp:
return False
rsp = CephBrokerRsp(broker_rsp)
unit_name = local_unit().partition('/')[2]
key = "unit_{}_ceph_broker_action.{}".format(unit_name, action)
kvstore = kv()
val = kvstore.get(key=key)
if val and val == rsp.request_id:
return True
return False | Check whether broker action has completed yet.
@param action: name of action to be performed
@returns True if action complete otherwise False | Below is the the instruction that describes the task:
### Input:
Check whether broker action has completed yet.
@param action: name of action to be performed
@returns True if action complete otherwise False
### Response:
def is_broker_action_done(action, rid=None, unit=None):
"""Check whether broker action has completed yet.
@param action: name of action to be performed
@returns True if action complete otherwise False
"""
rdata = relation_get(rid, unit) or {}
broker_rsp = rdata.get(get_broker_rsp_key())
if not broker_rsp:
return False
rsp = CephBrokerRsp(broker_rsp)
unit_name = local_unit().partition('/')[2]
key = "unit_{}_ceph_broker_action.{}".format(unit_name, action)
kvstore = kv()
val = kvstore.get(key=key)
if val and val == rsp.request_id:
return True
return False |
def from_base62(s):
"""
Convert a base62 String back into a number
:param s: The base62 encoded String
:return: The number encoded in the String (integer)
"""
result = 0
for c in s:
if c not in BASE62_MAP:
raise Exception('Invalid base64 string: %s' % s)
result = result * 62 + BASE62_MAP.index(c)
return result | Convert a base62 String back into a number
:param s: The base62 encoded String
:return: The number encoded in the String (integer) | Below is the the instruction that describes the task:
### Input:
Convert a base62 String back into a number
:param s: The base62 encoded String
:return: The number encoded in the String (integer)
### Response:
def from_base62(s):
"""
Convert a base62 String back into a number
:param s: The base62 encoded String
:return: The number encoded in the String (integer)
"""
result = 0
for c in s:
if c not in BASE62_MAP:
raise Exception('Invalid base64 string: %s' % s)
result = result * 62 + BASE62_MAP.index(c)
return result |
def simxAuxiliaryConsoleShow(clientID, consoleHandle, showState, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_AuxiliaryConsoleShow(clientID, consoleHandle, showState, operationMode) | Please have a look at the function description/documentation in the V-REP user manual | Below is the the instruction that describes the task:
### Input:
Please have a look at the function description/documentation in the V-REP user manual
### Response:
def simxAuxiliaryConsoleShow(clientID, consoleHandle, showState, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_AuxiliaryConsoleShow(clientID, consoleHandle, showState, operationMode) |
def trunk_angles(nrn, neurite_type=NeuriteType.all):
'''Calculates the angles between all the trunks of the neuron.
The angles are defined on the x-y plane and the trees
are sorted from the y axis and anticlock-wise.
'''
vectors = trunk_vectors(nrn, neurite_type=neurite_type)
# In order to avoid the failure of the process in case the neurite_type does not exist
if not vectors.size:
return []
def _sort_angle(p1, p2):
"""Angle between p1-p2 to sort vectors"""
ang1 = np.arctan2(*p1[::-1])
ang2 = np.arctan2(*p2[::-1])
return (ang1 - ang2)
# Sorting angles according to x-y plane
order = np.argsort(np.array([_sort_angle(i / np.linalg.norm(i), [0, 1])
for i in vectors[:, 0:2]]))
ordered_vectors = vectors[order][:, [COLS.X, COLS.Y]]
return [morphmath.angle_between_vectors(ordered_vectors[i], ordered_vectors[i - 1])
for i, _ in enumerate(ordered_vectors)] | Calculates the angles between all the trunks of the neuron.
The angles are defined on the x-y plane and the trees
are sorted from the y axis and anticlock-wise. | Below is the the instruction that describes the task:
### Input:
Calculates the angles between all the trunks of the neuron.
The angles are defined on the x-y plane and the trees
are sorted from the y axis and anticlock-wise.
### Response:
def trunk_angles(nrn, neurite_type=NeuriteType.all):
'''Calculates the angles between all the trunks of the neuron.
The angles are defined on the x-y plane and the trees
are sorted from the y axis and anticlock-wise.
'''
vectors = trunk_vectors(nrn, neurite_type=neurite_type)
# In order to avoid the failure of the process in case the neurite_type does not exist
if not vectors.size:
return []
def _sort_angle(p1, p2):
"""Angle between p1-p2 to sort vectors"""
ang1 = np.arctan2(*p1[::-1])
ang2 = np.arctan2(*p2[::-1])
return (ang1 - ang2)
# Sorting angles according to x-y plane
order = np.argsort(np.array([_sort_angle(i / np.linalg.norm(i), [0, 1])
for i in vectors[:, 0:2]]))
ordered_vectors = vectors[order][:, [COLS.X, COLS.Y]]
return [morphmath.angle_between_vectors(ordered_vectors[i], ordered_vectors[i - 1])
for i, _ in enumerate(ordered_vectors)] |
def operations(*operations):
'''Decorator for marking Resource methods as HTTP operations.
This decorator does a number of different things:
- It transfer onto itself docstring and annotations from the decorated
method, so as to be "transparent" with regards to introspection.
- It tranform the method so as to make it a classmethod.
- It invokes the method within a try-except condition, so as to
intercept and populate the Fail(<code>) conditions.'''
def decorator(method):
def wrapper(cls, request, start_response, **kwargs):
result_cache = []
try:
yield from method(cls, request, **kwargs)
except Respond as e:
# Inject messages as taken from signature
status = e.status
msg = utils.parse_return_annotation(method)[status]['message']
if status / 100 == 2: # All 2xx HTTP codes
e.description = msg
raise e
else: # HTTP Errors --> use werkzeug exceptions
raise CODES_TO_EXCEPTIONS[status](msg)
# Add operation-specific attributes to the method.
method.swagger_ops = operations
method.signature = inspect.signature(method)
method.source = inspect.getsource(method)
method.path_vars = utils.extract_pathvars(method)
# "Backport" the method introspective attributes to the wrapper.
wrapper.__name__ = method.__name__
wrapper.__doc__ = method.__doc__
wrapper.__annotations__ = method.__annotations__
wrapper.swagger_ops = method.swagger_ops
wrapper.signature = method.signature
wrapper.source = method.source
wrapper.path_vars = method.path_vars
return classmethod(wrapper)
return decorator | Decorator for marking Resource methods as HTTP operations.
This decorator does a number of different things:
- It transfer onto itself docstring and annotations from the decorated
method, so as to be "transparent" with regards to introspection.
- It tranform the method so as to make it a classmethod.
- It invokes the method within a try-except condition, so as to
intercept and populate the Fail(<code>) conditions. | Below is the the instruction that describes the task:
### Input:
Decorator for marking Resource methods as HTTP operations.
This decorator does a number of different things:
- It transfer onto itself docstring and annotations from the decorated
method, so as to be "transparent" with regards to introspection.
- It tranform the method so as to make it a classmethod.
- It invokes the method within a try-except condition, so as to
intercept and populate the Fail(<code>) conditions.
### Response:
def operations(*operations):
'''Decorator for marking Resource methods as HTTP operations.
This decorator does a number of different things:
- It transfer onto itself docstring and annotations from the decorated
method, so as to be "transparent" with regards to introspection.
- It tranform the method so as to make it a classmethod.
- It invokes the method within a try-except condition, so as to
intercept and populate the Fail(<code>) conditions.'''
def decorator(method):
def wrapper(cls, request, start_response, **kwargs):
result_cache = []
try:
yield from method(cls, request, **kwargs)
except Respond as e:
# Inject messages as taken from signature
status = e.status
msg = utils.parse_return_annotation(method)[status]['message']
if status / 100 == 2: # All 2xx HTTP codes
e.description = msg
raise e
else: # HTTP Errors --> use werkzeug exceptions
raise CODES_TO_EXCEPTIONS[status](msg)
# Add operation-specific attributes to the method.
method.swagger_ops = operations
method.signature = inspect.signature(method)
method.source = inspect.getsource(method)
method.path_vars = utils.extract_pathvars(method)
# "Backport" the method introspective attributes to the wrapper.
wrapper.__name__ = method.__name__
wrapper.__doc__ = method.__doc__
wrapper.__annotations__ = method.__annotations__
wrapper.swagger_ops = method.swagger_ops
wrapper.signature = method.signature
wrapper.source = method.source
wrapper.path_vars = method.path_vars
return classmethod(wrapper)
return decorator |
def permission_required(perm, fn=None, login_url=None, raise_exception=False, redirect_field_name=REDIRECT_FIELD_NAME):
"""
View decorator that checks for the given permissions before allowing the
view to execute. Use it like this::
from django.shortcuts import get_object_or_404
from rules.contrib.views import permission_required
from posts.models import Post
def get_post_by_pk(request, post_id):
return get_object_or_404(Post, pk=post_id)
@permission_required('posts.change_post', fn=get_post_by_pk)
def post_update(request, post_id):
# ...
``perm`` is either a permission name as a string, or a list of permission
names.
``fn`` is an optional callback that receives the same arguments as those
passed to the decorated view and must return the object to check
permissions against. If omitted, the decorator behaves just like Django's
``permission_required`` decorator, i.e. checks for model-level permissions.
``raise_exception`` is a boolean specifying whether to raise a
``django.core.exceptions.PermissionDenied`` exception if the check fails.
You will most likely want to set this argument to ``True`` if you have
specified a custom 403 response handler in your urlconf. If ``False``,
the user will be redirected to the URL specified by ``login_url``.
``login_url`` is an optional custom URL to redirect the user to if
permissions check fails. If omitted or empty, ``settings.LOGIN_URL`` is
used.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
# Normalize to a list of permissions
if isinstance(perm, six.string_types):
perms = (perm,)
else:
perms = perm
# Get the object to check permissions against
if callable(fn):
obj = fn(request, *args, **kwargs)
else: # pragma: no cover
obj = fn
# Get the user
user = request.user
# Check for permissions and return a response
if not user.has_perms(perms, obj):
# User does not have a required permission
if raise_exception:
raise PermissionDenied()
else:
return _redirect_to_login(request, view_func.__name__,
login_url, redirect_field_name)
else:
# User has all required permissions -- allow the view to execute
return view_func(request, *args, **kwargs)
return _wrapped_view
return decorator | View decorator that checks for the given permissions before allowing the
view to execute. Use it like this::
from django.shortcuts import get_object_or_404
from rules.contrib.views import permission_required
from posts.models import Post
def get_post_by_pk(request, post_id):
return get_object_or_404(Post, pk=post_id)
@permission_required('posts.change_post', fn=get_post_by_pk)
def post_update(request, post_id):
# ...
``perm`` is either a permission name as a string, or a list of permission
names.
``fn`` is an optional callback that receives the same arguments as those
passed to the decorated view and must return the object to check
permissions against. If omitted, the decorator behaves just like Django's
``permission_required`` decorator, i.e. checks for model-level permissions.
``raise_exception`` is a boolean specifying whether to raise a
``django.core.exceptions.PermissionDenied`` exception if the check fails.
You will most likely want to set this argument to ``True`` if you have
specified a custom 403 response handler in your urlconf. If ``False``,
the user will be redirected to the URL specified by ``login_url``.
``login_url`` is an optional custom URL to redirect the user to if
permissions check fails. If omitted or empty, ``settings.LOGIN_URL`` is
used. | Below is the the instruction that describes the task:
### Input:
View decorator that checks for the given permissions before allowing the
view to execute. Use it like this::
from django.shortcuts import get_object_or_404
from rules.contrib.views import permission_required
from posts.models import Post
def get_post_by_pk(request, post_id):
return get_object_or_404(Post, pk=post_id)
@permission_required('posts.change_post', fn=get_post_by_pk)
def post_update(request, post_id):
# ...
``perm`` is either a permission name as a string, or a list of permission
names.
``fn`` is an optional callback that receives the same arguments as those
passed to the decorated view and must return the object to check
permissions against. If omitted, the decorator behaves just like Django's
``permission_required`` decorator, i.e. checks for model-level permissions.
``raise_exception`` is a boolean specifying whether to raise a
``django.core.exceptions.PermissionDenied`` exception if the check fails.
You will most likely want to set this argument to ``True`` if you have
specified a custom 403 response handler in your urlconf. If ``False``,
the user will be redirected to the URL specified by ``login_url``.
``login_url`` is an optional custom URL to redirect the user to if
permissions check fails. If omitted or empty, ``settings.LOGIN_URL`` is
used.
### Response:
def permission_required(perm, fn=None, login_url=None, raise_exception=False, redirect_field_name=REDIRECT_FIELD_NAME):
"""
View decorator that checks for the given permissions before allowing the
view to execute. Use it like this::
from django.shortcuts import get_object_or_404
from rules.contrib.views import permission_required
from posts.models import Post
def get_post_by_pk(request, post_id):
return get_object_or_404(Post, pk=post_id)
@permission_required('posts.change_post', fn=get_post_by_pk)
def post_update(request, post_id):
# ...
``perm`` is either a permission name as a string, or a list of permission
names.
``fn`` is an optional callback that receives the same arguments as those
passed to the decorated view and must return the object to check
permissions against. If omitted, the decorator behaves just like Django's
``permission_required`` decorator, i.e. checks for model-level permissions.
``raise_exception`` is a boolean specifying whether to raise a
``django.core.exceptions.PermissionDenied`` exception if the check fails.
You will most likely want to set this argument to ``True`` if you have
specified a custom 403 response handler in your urlconf. If ``False``,
the user will be redirected to the URL specified by ``login_url``.
``login_url`` is an optional custom URL to redirect the user to if
permissions check fails. If omitted or empty, ``settings.LOGIN_URL`` is
used.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
# Normalize to a list of permissions
if isinstance(perm, six.string_types):
perms = (perm,)
else:
perms = perm
# Get the object to check permissions against
if callable(fn):
obj = fn(request, *args, **kwargs)
else: # pragma: no cover
obj = fn
# Get the user
user = request.user
# Check for permissions and return a response
if not user.has_perms(perms, obj):
# User does not have a required permission
if raise_exception:
raise PermissionDenied()
else:
return _redirect_to_login(request, view_func.__name__,
login_url, redirect_field_name)
else:
# User has all required permissions -- allow the view to execute
return view_func(request, *args, **kwargs)
return _wrapped_view
return decorator |
def get_id_head(self):
'''
Returns the id of the target that is set as "head"
@rtype: string
@return: the target id (or None) of the head target
'''
id_head = None
for target_node in self:
if target_node.is_head():
id_head = target_node.get_id()
break
return id_head | Returns the id of the target that is set as "head"
@rtype: string
@return: the target id (or None) of the head target | Below is the the instruction that describes the task:
### Input:
Returns the id of the target that is set as "head"
@rtype: string
@return: the target id (or None) of the head target
### Response:
def get_id_head(self):
'''
Returns the id of the target that is set as "head"
@rtype: string
@return: the target id (or None) of the head target
'''
id_head = None
for target_node in self:
if target_node.is_head():
id_head = target_node.get_id()
break
return id_head |
def parse_parameters(self, parameters):
"""Parses and sets parameters in the model."""
self.parameters = []
for param_name, param_value in parameters.items():
p = Parameter(param_name, param_value)
if p:
self.parameters.append(p) | Parses and sets parameters in the model. | Below is the the instruction that describes the task:
### Input:
Parses and sets parameters in the model.
### Response:
def parse_parameters(self, parameters):
"""Parses and sets parameters in the model."""
self.parameters = []
for param_name, param_value in parameters.items():
p = Parameter(param_name, param_value)
if p:
self.parameters.append(p) |
def cmode(self, channel, modes=''):
"""
Sets or gets the channel mode.
Required arguments:
* channel - Channel to set/get modes of.
Optional arguments:
* modes='' - Modes to set.
If not specified return the modes of the channel.
"""
with self.lock:
self.is_in_channel(channel)
if not modes:
self.send('MODE %s' % channel)
modes = ''
mode_set_time = None
while self.readable():
msg = self._recv(rm_colon=True, \
expected_replies=('324', '329'))
if msg[0] == '324':
modes = msg[2].split()[1].replace('+', '', 1)
elif msg[0] == '329':
mode_set_time = self._m_time.localtime( \
int(msg[2].split()[1]))
return modes, mode_set_time
else:
self.send('MODE %s %s' % (channel, modes))
if self.readable():
msg = self._recv(expected_replies=('MODE',), \
ignore_unexpected_replies=True)
if msg[0]:
mode = msg[2]
self.parse_cmode_string(mode, msg[1])
if not self.hide_called_events:
self.stepback() | Sets or gets the channel mode.
Required arguments:
* channel - Channel to set/get modes of.
Optional arguments:
* modes='' - Modes to set.
If not specified return the modes of the channel. | Below is the the instruction that describes the task:
### Input:
Sets or gets the channel mode.
Required arguments:
* channel - Channel to set/get modes of.
Optional arguments:
* modes='' - Modes to set.
If not specified return the modes of the channel.
### Response:
def cmode(self, channel, modes=''):
"""
Sets or gets the channel mode.
Required arguments:
* channel - Channel to set/get modes of.
Optional arguments:
* modes='' - Modes to set.
If not specified return the modes of the channel.
"""
with self.lock:
self.is_in_channel(channel)
if not modes:
self.send('MODE %s' % channel)
modes = ''
mode_set_time = None
while self.readable():
msg = self._recv(rm_colon=True, \
expected_replies=('324', '329'))
if msg[0] == '324':
modes = msg[2].split()[1].replace('+', '', 1)
elif msg[0] == '329':
mode_set_time = self._m_time.localtime( \
int(msg[2].split()[1]))
return modes, mode_set_time
else:
self.send('MODE %s %s' % (channel, modes))
if self.readable():
msg = self._recv(expected_replies=('MODE',), \
ignore_unexpected_replies=True)
if msg[0]:
mode = msg[2]
self.parse_cmode_string(mode, msg[1])
if not self.hide_called_events:
self.stepback() |
def kwargs_only(func):
"""
Make a function only accept keyword arguments.
This can be dropped in Python 3 in lieu of:
def foo(*, bar=default):
"""
if hasattr(inspect, 'signature'): # pragma: no cover
# Python 3
signature = inspect.signature(func)
first_arg_name = list(signature.parameters.keys())[0]
else: # pragma: no cover
# Python 2
signature = inspect.getargspec(func)
first_arg_name = signature.args[0]
if first_arg_name in ('self', 'cls'):
allowable_args = 1
else:
allowable_args = 0
@wraps(func)
def wrapper(*args, **kwargs):
if len(args) > allowable_args:
raise TypeError("{} should only be called with keyword args".format(func.__name__))
return func(*args, **kwargs)
return wrapper | Make a function only accept keyword arguments.
This can be dropped in Python 3 in lieu of:
def foo(*, bar=default): | Below is the the instruction that describes the task:
### Input:
Make a function only accept keyword arguments.
This can be dropped in Python 3 in lieu of:
def foo(*, bar=default):
### Response:
def kwargs_only(func):
"""
Make a function only accept keyword arguments.
This can be dropped in Python 3 in lieu of:
def foo(*, bar=default):
"""
if hasattr(inspect, 'signature'): # pragma: no cover
# Python 3
signature = inspect.signature(func)
first_arg_name = list(signature.parameters.keys())[0]
else: # pragma: no cover
# Python 2
signature = inspect.getargspec(func)
first_arg_name = signature.args[0]
if first_arg_name in ('self', 'cls'):
allowable_args = 1
else:
allowable_args = 0
@wraps(func)
def wrapper(*args, **kwargs):
if len(args) > allowable_args:
raise TypeError("{} should only be called with keyword args".format(func.__name__))
return func(*args, **kwargs)
return wrapper |
def _make_module_refnode(self, builder, fromdocname, name, contnode):
"""Helper function to generate new xref node based on
current environment.
"""
# Get additional info for modules.
docname, synopsis, platform, deprecated = self.data['modules'][name]
title = name
if synopsis:
title += ': ' + synopsis
if deprecated:
title += _(' (deprecated)')
if platform:
title += ' (' + platform + ')'
return make_refnode(builder, fromdocname, docname,
'module-' + name, contnode, title) | Helper function to generate new xref node based on
current environment. | Below is the the instruction that describes the task:
### Input:
Helper function to generate new xref node based on
current environment.
### Response:
def _make_module_refnode(self, builder, fromdocname, name, contnode):
"""Helper function to generate new xref node based on
current environment.
"""
# Get additional info for modules.
docname, synopsis, platform, deprecated = self.data['modules'][name]
title = name
if synopsis:
title += ': ' + synopsis
if deprecated:
title += _(' (deprecated)')
if platform:
title += ' (' + platform + ')'
return make_refnode(builder, fromdocname, docname,
'module-' + name, contnode, title) |
def _build_pyramid(self, image, levels):
""" Returns a list of reduced-size images, from smallest to original size """
pyramid = [image]
for l in range(levels-1):
if any(x < 20 for x in pyramid[-1].shape[:2]):
break
pyramid.append(cv2.pyrDown(pyramid[-1]))
return list(reversed(pyramid)) | Returns a list of reduced-size images, from smallest to original size | Below is the the instruction that describes the task:
### Input:
Returns a list of reduced-size images, from smallest to original size
### Response:
def _build_pyramid(self, image, levels):
""" Returns a list of reduced-size images, from smallest to original size """
pyramid = [image]
for l in range(levels-1):
if any(x < 20 for x in pyramid[-1].shape[:2]):
break
pyramid.append(cv2.pyrDown(pyramid[-1]))
return list(reversed(pyramid)) |
def make_plot(self):
"""Generate the plot from time series and arguments
"""
plot = Plot(figsize=self.figsize, dpi=self.dpi)
ax = plot.gca(xscale='auto-gps')
# handle user specified plot labels
if self.args.legend:
nlegargs = len(self.args.legend[0])
else:
nlegargs = 0
if nlegargs > 0 and nlegargs != self.n_datasets:
warnings.warn('The number of legends specified must match '
'the number of time series'
' (channels * start times). '
'There are {:d} series and {:d} legends'.format(
len(self.timeseries), len(self.args.legend)))
nlegargs = 0 # don't use them
for i in range(0, self.n_datasets):
series = self.timeseries[i]
if nlegargs:
label = self.args.legend[0][i]
else:
label = series.channel.name
if self.usetex:
label = label_to_latex(label)
ax.plot(series, label=label)
return plot | Generate the plot from time series and arguments | Below is the the instruction that describes the task:
### Input:
Generate the plot from time series and arguments
### Response:
def make_plot(self):
"""Generate the plot from time series and arguments
"""
plot = Plot(figsize=self.figsize, dpi=self.dpi)
ax = plot.gca(xscale='auto-gps')
# handle user specified plot labels
if self.args.legend:
nlegargs = len(self.args.legend[0])
else:
nlegargs = 0
if nlegargs > 0 and nlegargs != self.n_datasets:
warnings.warn('The number of legends specified must match '
'the number of time series'
' (channels * start times). '
'There are {:d} series and {:d} legends'.format(
len(self.timeseries), len(self.args.legend)))
nlegargs = 0 # don't use them
for i in range(0, self.n_datasets):
series = self.timeseries[i]
if nlegargs:
label = self.args.legend[0][i]
else:
label = series.channel.name
if self.usetex:
label = label_to_latex(label)
ax.plot(series, label=label)
return plot |
def update_hosting_device_status(self, context, host, status_info):
"""Report status changes for hosting devices.
:param context: contains user information
:param host: originator of callback
:param status_info: Dictionary with list of hosting device ids for
each type of hosting device status to be updated i.e.::
{
HD_ACTIVE: list_of_ids_of_active_hds,
HD_NOT_RESPONDING: list_of_ids_of_not_responding_hds,
HD_DEAD: list_of_ids_of_dead_hds,
...
}
"""
for status, hd_ids in six.iteritems(status_info):
# update hosting device entry in db to new status
hd_spec = {'hosting_device': {'status': status}}
for hd_id in hd_ids:
self._dmplugin.update_hosting_device(context, hd_id, hd_spec)
if status == const.HD_DEAD or status == const.HD_ERROR:
self._dmplugin.handle_non_responding_hosting_devices(
context, host, hd_ids) | Report status changes for hosting devices.
:param context: contains user information
:param host: originator of callback
:param status_info: Dictionary with list of hosting device ids for
each type of hosting device status to be updated i.e.::
{
HD_ACTIVE: list_of_ids_of_active_hds,
HD_NOT_RESPONDING: list_of_ids_of_not_responding_hds,
HD_DEAD: list_of_ids_of_dead_hds,
...
} | Below is the the instruction that describes the task:
### Input:
Report status changes for hosting devices.
:param context: contains user information
:param host: originator of callback
:param status_info: Dictionary with list of hosting device ids for
each type of hosting device status to be updated i.e.::
{
HD_ACTIVE: list_of_ids_of_active_hds,
HD_NOT_RESPONDING: list_of_ids_of_not_responding_hds,
HD_DEAD: list_of_ids_of_dead_hds,
...
}
### Response:
def update_hosting_device_status(self, context, host, status_info):
"""Report status changes for hosting devices.
:param context: contains user information
:param host: originator of callback
:param status_info: Dictionary with list of hosting device ids for
each type of hosting device status to be updated i.e.::
{
HD_ACTIVE: list_of_ids_of_active_hds,
HD_NOT_RESPONDING: list_of_ids_of_not_responding_hds,
HD_DEAD: list_of_ids_of_dead_hds,
...
}
"""
for status, hd_ids in six.iteritems(status_info):
# update hosting device entry in db to new status
hd_spec = {'hosting_device': {'status': status}}
for hd_id in hd_ids:
self._dmplugin.update_hosting_device(context, hd_id, hd_spec)
if status == const.HD_DEAD or status == const.HD_ERROR:
self._dmplugin.handle_non_responding_hosting_devices(
context, host, hd_ids) |
def _send_command(self, command, raw_text=False):
"""
Wrapper for NX-API show method.
Allows more code sharing between NX-API and SSH.
"""
return self.device.show(command, raw_text=raw_text) | Wrapper for NX-API show method.
Allows more code sharing between NX-API and SSH. | Below is the the instruction that describes the task:
### Input:
Wrapper for NX-API show method.
Allows more code sharing between NX-API and SSH.
### Response:
def _send_command(self, command, raw_text=False):
"""
Wrapper for NX-API show method.
Allows more code sharing between NX-API and SSH.
"""
return self.device.show(command, raw_text=raw_text) |
def reset(self):
"""Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation.
"""
with tf.name_scope(self._name + '/reset'):
return tf.group(
self._count.assign(0),
self._mean.assign(tf.zeros_like(self._mean)),
self._var_sum.assign(tf.zeros_like(self._var_sum))) | Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation. | Below is the the instruction that describes the task:
### Input:
Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation.
### Response:
def reset(self):
"""Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation.
"""
with tf.name_scope(self._name + '/reset'):
return tf.group(
self._count.assign(0),
self._mean.assign(tf.zeros_like(self._mean)),
self._var_sum.assign(tf.zeros_like(self._var_sum))) |
def resolves_for(self, session):
"""
Returns whether this query resolves for the given session.
Args:
session (Session): The session for which this query should be executed.
Returns:
bool: Whether this query resolves.
"""
if self.url:
self.actual_path = session.current_url
else:
result = urlparse(session.current_url)
if self.only_path:
self.actual_path = result.path
else:
request_uri = result.path
if result.query:
request_uri += "?{0}".format(result.query)
self.actual_path = request_uri
if isregex(self.expected_path):
return self.expected_path.search(self.actual_path)
else:
return normalize_url(self.actual_path) == normalize_url(self.expected_path) | Returns whether this query resolves for the given session.
Args:
session (Session): The session for which this query should be executed.
Returns:
bool: Whether this query resolves. | Below is the the instruction that describes the task:
### Input:
Returns whether this query resolves for the given session.
Args:
session (Session): The session for which this query should be executed.
Returns:
bool: Whether this query resolves.
### Response:
def resolves_for(self, session):
"""
Returns whether this query resolves for the given session.
Args:
session (Session): The session for which this query should be executed.
Returns:
bool: Whether this query resolves.
"""
if self.url:
self.actual_path = session.current_url
else:
result = urlparse(session.current_url)
if self.only_path:
self.actual_path = result.path
else:
request_uri = result.path
if result.query:
request_uri += "?{0}".format(result.query)
self.actual_path = request_uri
if isregex(self.expected_path):
return self.expected_path.search(self.actual_path)
else:
return normalize_url(self.actual_path) == normalize_url(self.expected_path) |
def _move_agent(self, agent, direction, wrap_allowed=True):
"""
moves agent 'agent' in 'direction'
"""
x,y = agent.coords['x'], agent.coords['y']
print('moving agent ', agent.name, 'to x,y=', direction, 'wrap_allowed = ', wrap_allowed)
agent.coords['x'] = x + direction[0]
agent.coords['y'] = y + direction[1] | moves agent 'agent' in 'direction' | Below is the the instruction that describes the task:
### Input:
moves agent 'agent' in 'direction'
### Response:
def _move_agent(self, agent, direction, wrap_allowed=True):
"""
moves agent 'agent' in 'direction'
"""
x,y = agent.coords['x'], agent.coords['y']
print('moving agent ', agent.name, 'to x,y=', direction, 'wrap_allowed = ', wrap_allowed)
agent.coords['x'] = x + direction[0]
agent.coords['y'] = y + direction[1] |
def peer_store(key, value, relation_name='cluster'):
"""Store the key/value pair on the named peer relation `relation_name`."""
cluster_rels = relation_ids(relation_name)
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
relation_set(relation_id=cluster_rid,
relation_settings={key: value})
else:
raise ValueError('Unable to detect '
'peer relation {}'.format(relation_name)) | Store the key/value pair on the named peer relation `relation_name`. | Below is the the instruction that describes the task:
### Input:
Store the key/value pair on the named peer relation `relation_name`.
### Response:
def peer_store(key, value, relation_name='cluster'):
"""Store the key/value pair on the named peer relation `relation_name`."""
cluster_rels = relation_ids(relation_name)
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
relation_set(relation_id=cluster_rid,
relation_settings={key: value})
else:
raise ValueError('Unable to detect '
'peer relation {}'.format(relation_name)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.