text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def annotate(self, framedata):
"""Annotates the processed axis with given annotations for the provided framedata. Args: framedata: The current frame number. """ |
for artist in self.annotation_artists:
artist.remove()
self.annotation_artists = []
for annotation in self.annotations:
if annotation[2] > framedata:
return
if annotation[2] == framedata:
pos = annotation[0:2]
shape = self.annotations_default['shape']
color = self.annotations_default['color']
size = self.annotations_default['size']
line = self.annotations_default['line']
if len(annotation) > 3:
shape = annotation[3].get('shape', shape)
color = annotation[3].get('color', color)
size = annotation[3].get('size', size)
line = annotation[3].get('line', line)
if shape == 'CIRC' and hasattr(size, '__len__'):
size = 30
if not hasattr(color, '__len__'):
color = (color,) * 3
if shape == 'RECT':
patch = patches.Rectangle((pos[0] - size[0] // 2,
pos[1] - size[1] // 2),
size[0], size[1], fill=False,
lw=line, fc='none', ec=color)
elif shape == 'CIRC':
patch = patches.CirclePolygon(pos, radius=size, fc='none',
ec=color, lw=line)
self.annotation_artists.append(patch)
self.axes_processed.add_artist(self.annotation_artists[-1]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _draw_frame(self, framedata):
"""Reads, processes and draws the frames. If needed for color maps, conversions to gray scale are performed. In case the images are no color images and no custom color maps are defined, the colormap `gray` is applied. This function is called by TimedAnimation. Args: framedata: The frame data. """ |
original = self.read_frame()
if original is None:
self.update_info(self.info_string(message='Finished.',
frame=framedata))
return
if self.original is not None:
processed = self.process_frame(original.copy())
if self.cmap_original is not None:
original = to_gray(original)
elif not is_color_image(original):
self.original.set_cmap('gray')
self.original.set_data(original)
else:
processed = self.process_frame(original)
if self.cmap_processed is not None:
processed = to_gray(processed)
elif not is_color_image(processed):
self.processed.set_cmap('gray')
if self.annotations:
self.annotate(framedata)
self.processed.set_data(processed)
self.update_info(self.info_string(frame=framedata)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_info(self, custom=None):
"""Updates the figure's suptitle. Calls self.info_string() unless custom is provided. Args: custom: Overwrite it with this string, unless None. """ |
self.figure.suptitle(self.info_string() if custom is None else custom) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def info_string(self, size=None, message='', frame=-1):
"""Returns information about the stream. Generates a string containing size, frame number, and info messages. Omits unnecessary information (e.g. empty messages and frame -1). This method is primarily used to update the suptitle of the plot figure. Returns: An info string. """ |
info = []
if size is not None:
info.append('Size: {1}x{0}'.format(*size))
elif self.size is not None:
info.append('Size: {1}x{0}'.format(*self.size))
if frame >= 0:
info.append('Frame: {}'.format(frame))
if message != '':
info.append('{}'.format(message))
return ' '.join(info) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _slice_required_len(slice_obj):
""" Calculate how many items must be in the collection to satisfy this slice returns `None` for slices may vary based on the length of the underlying collection such as `lst[-1]` or `lst[::]` """ |
if slice_obj.step and slice_obj.step != 1:
return None
# (None, None, *) requires the entire list
if slice_obj.start is None and slice_obj.stop is None:
return None
# Negative indexes are hard without knowing the collection length
if slice_obj.start and slice_obj.start < 0:
return None
if slice_obj.stop and slice_obj.stop < 0:
return None
if slice_obj.stop:
if slice_obj.start and slice_obj.start > slice_obj.stop:
return 0
return slice_obj.stop
return slice_obj.start + 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stylize(text, styles, reset=True):
"""conveniently styles your text as and resets ANSI codes at its end.""" |
terminator = attr("reset") if reset else ""
return "{}{}{}".format("".join(styles), text, terminator) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def attribute(self):
"""Set or reset attributes""" |
paint = {
"bold": self.ESC + "1" + self.END,
1: self.ESC + "1" + self.END,
"dim": self.ESC + "2" + self.END,
2: self.ESC + "2" + self.END,
"underlined": self.ESC + "4" + self.END,
4: self.ESC + "4" + self.END,
"blink": self.ESC + "5" + self.END,
5: self.ESC + "5" + self.END,
"reverse": self.ESC + "7" + self.END,
7: self.ESC + "7" + self.END,
"hidden": self.ESC + "8" + self.END,
8: self.ESC + "8" + self.END,
"reset": self.ESC + "0" + self.END,
0: self.ESC + "0" + self.END,
"res_bold": self.ESC + "21" + self.END,
21: self.ESC + "21" + self.END,
"res_dim": self.ESC + "22" + self.END,
22: self.ESC + "22" + self.END,
"res_underlined": self.ESC + "24" + self.END,
24: self.ESC + "24" + self.END,
"res_blink": self.ESC + "25" + self.END,
25: self.ESC + "25" + self.END,
"res_reverse": self.ESC + "27" + self.END,
27: self.ESC + "27" + self.END,
"res_hidden": self.ESC + "28" + self.END,
28: self.ESC + "28" + self.END,
}
return paint[self.color] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def foreground(self):
"""Print 256 foreground colors""" |
code = self.ESC + "38;5;"
if str(self.color).isdigit():
self.reverse_dict()
color = self.reserve_paint[str(self.color)]
return code + self.paint[color] + self.END
elif self.color.startswith("#"):
return code + str(self.HEX) + self.END
else:
return code + self.paint[self.color] + self.END |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reset(self, required=False):
""" Perform a reset and check for presence pulse. :param bool required: require presence pulse """ |
reset = self._ow.reset()
if required and reset:
raise OneWireError("No presence pulse found. Check devices and wiring.")
return not reset |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scan(self):
"""Scan for devices on the bus and return a list of addresses.""" |
devices = []
diff = 65
rom = False
count = 0
for _ in range(0xff):
rom, diff = self._search_rom(rom, diff)
if rom:
count += 1
if count > self.maximum_devices:
raise RuntimeError(
"Maximum device count of {} exceeded."\
.format(self.maximum_devices))
devices.append(OneWireAddress(rom))
if diff == 0:
break
return devices |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def crc8(data):
""" Perform the 1-Wire CRC check on the provided data. :param bytearray data: 8 byte array representing 64 bit ROM code """ |
crc = 0
for byte in data:
crc ^= byte
for _ in range(8):
if crc & 0x01:
crc = (crc >> 1) ^ 0x8C
else:
crc >>= 1
crc &= 0xFF
return crc |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def preferences_class_prepared(sender, *args, **kwargs):
""" Adds various preferences members to preferences.preferences, thus enabling easy access from code. """ |
cls = sender
if issubclass(cls, Preferences):
# Add singleton manager to subclasses.
cls.add_to_class('singleton', SingletonManager())
# Add property for preferences object to preferences.preferences.
setattr(preferences.Preferences, cls._meta.object_name, property(lambda x: cls.singleton.get())) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def site_cleanup(sender, action, instance, **kwargs):
""" Make sure there is only a single preferences object per site. So remove sites from pre-existing preferences objects. """ |
if action == 'post_add':
if isinstance(instance, Preferences) \
and hasattr(instance.__class__, 'objects'):
site_conflicts = instance.__class__.objects.filter(
sites__in=instance.sites.all()
).only('id').distinct()
for conflict in site_conflicts:
if conflict.id != instance.id:
for site in instance.sites.all():
conflict.sites.remove(site) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_queryset(self):
""" Return the first preferences object for the current site. If preferences do not exist create it. """ |
queryset = super(SingletonManager, self).get_queryset()
# Get current site
current_site = None
if getattr(settings, 'SITE_ID', None) is not None:
current_site = Site.objects.get_current()
# If site found limit queryset to site.
if current_site is not None:
queryset = queryset.filter(sites=settings.SITE_ID)
if not queryset.exists():
# Create object (for current site) if it doesn't exist.
obj = self.model.objects.create()
if current_site is not None:
obj.sites.add(current_site)
return queryset |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def load_iterable(self, iterable, session=None):
'''Load an ``iterable``.
By default it returns a generator of data loaded via the
:meth:`loads` method.
:param iterable: an iterable over data to load.
:param session: Optional :class:`stdnet.odm.Session`.
:return: an iterable over decoded data.
'''
data = []
load = self.loads
for v in iterable:
data.append(load(v))
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _search(self, words, include=None, exclude=None, lookup=None):
'''Full text search. Return a list of queries to intersect.'''
lookup = lookup or 'contains'
query = self.router.worditem.query()
if include:
query = query.filter(model_type__in=include)
if exclude:
query = query.exclude(model_type__in=include)
if not words:
return [query]
qs = []
if lookup == 'in':
# we are looking for items with at least one word in it
qs.append(query.filter(word__in=words))
elif lookup == 'contains':
#we want to match every single words
for word in words:
qs.append(query.filter(word=word))
else:
raise ValueError('Unknown lookup "{0}"'.format(lookup))
return qs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def redis_client(address=None, connection_pool=None, timeout=None,
parser=None, **kwargs):
'''Get a new redis client.
:param address: a ``host``, ``port`` tuple.
:param connection_pool: optional connection pool.
:param timeout: socket timeout.
:param timeout: socket timeout.
'''
if not connection_pool:
if timeout == 0:
if not async:
raise ImportError('Asynchronous connection requires async '
'bindings installed.')
return async.pool.redis(address, **kwargs)
else:
kwargs['socket_timeout'] = timeout
return Redis(address[0], address[1], **kwargs)
else:
return Redis(connection_pool=connection_pool) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def dict_flat_generator(value, attname=None, splitter=JSPLITTER,
dumps=None, prefix=None, error=ValueError,
recursive=True):
'''Convert a nested dictionary into a flat dictionary representation'''
if not isinstance(value, dict) or not recursive:
if not prefix:
raise error('Cannot assign a non dictionary to a JSON field')
else:
name = '%s%s%s' % (attname, splitter,
prefix) if attname else prefix
yield name, dumps(value) if dumps else value
else:
# loop over dictionary
for field in value:
val = value[field]
key = prefix
if field:
key = '%s%s%s' % (prefix, splitter,
field) if prefix else field
for k, v2 in dict_flat_generator(val, attname, splitter, dumps,
key, error, field):
yield k, v2 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def addmul_number_dicts(series):
'''Multiply dictionaries by a numeric values and add them together.
:parameter series: a tuple of two elements tuples. Each serie is of the form::
(weight,dictionary)
where ``weight`` is a number and ``dictionary`` is a dictionary with
numeric values.
:parameter skip: optional list of field names to skip.
Only common fields are aggregated. If a field has a non-numeric value it is
not included either.'''
if not series:
return
vtype = value_type((s[1] for s in series))
if vtype == 1:
return sum((weight*float(d) for weight, d in series))
elif vtype == 3:
keys = set(series[0][1])
for serie in series[1:]:
keys.intersection_update(serie[1])
results = {}
for key in keys:
key_series = tuple((weight, d[key]) for weight, d in series)
result = addmul_number_dicts(key_series)
if result is not None:
results[key] = result
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def Download(campaign=0, queue='build', email=None, walltime=8, **kwargs):
'''
Submits a cluster job to the build queue to download all TPFs for a given
campaign.
:param int campaign: The `K2` campaign to run
:param str queue: The name of the queue to submit to. Default `build`
:param str email: The email to send job status notifications to. \
Default `None`
:param int walltime: The number of hours to request. Default `8`
'''
# Figure out the subcampaign
if type(campaign) is int:
subcampaign = -1
elif type(campaign) is float:
x, y = divmod(campaign, 1)
campaign = int(x)
subcampaign = round(y * 10)
# Submit the cluster job
pbsfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'download.pbs')
str_w = 'walltime=%d:00:00' % walltime
str_v = 'EVEREST_DAT=%s,CAMPAIGN=%d,SUBCAMPAIGN=%d' % (
EVEREST_DAT, campaign, subcampaign)
if subcampaign == -1:
str_name = 'download_c%02d' % campaign
else:
str_name = 'download_c%02d.%d' % (campaign, subcampaign)
str_out = os.path.join(EVEREST_DAT, 'k2', str_name + '.log')
qsub_args = ['qsub', pbsfile,
'-q', queue,
'-v', str_v,
'-o', str_out,
'-j', 'oe',
'-N', str_name,
'-l', str_w]
if email is not None:
qsub_args.append(['-M', email, '-m', 'ae'])
# Now we submit the job
print("Submitting the job...")
subprocess.call(qsub_args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def Run(campaign=0, EPIC=None, nodes=5, ppn=12, walltime=100,
mpn=None, email=None, queue=None, **kwargs):
'''
Submits a cluster job to compute and plot data for all targets
in a given campaign.
:param campaign: The K2 campaign number. If this is an :py:class:`int`, \
returns all targets in that campaign. If a :py:class:`float` \
in the form `X.Y`, runs the `Y^th` decile of campaign `X`.
:param str queue: The queue to submit to. Default `None` (default queue)
:param str email: The email to send job status notifications to. \
Default `None`
:param int walltime: The number of hours to request. Default `100`
:param int nodes: The number of nodes to request. Default `5`
:param int ppn: The number of processors per node to request. Default `12`
:param int mpn: Memory per node in gb to request. Default no setting.
'''
# Figure out the subcampaign
if type(campaign) is int:
subcampaign = -1
elif type(campaign) is float:
x, y = divmod(campaign, 1)
campaign = int(x)
subcampaign = round(y * 10)
# DEV hack: limit backfill jobs to 10 hours
if EVEREST_DEV and (queue == 'bf'):
walltime = min(10, walltime)
# Convert kwargs to string. This is really hacky. Pickle creates an array
# of bytes, which we must convert into a regular string to pass to the pbs
# script and then back into python. Decoding the bytes isn't enough, since
# we have pesky escaped characters such as newlines that don't behave well
# when passing this string around. My braindead hack is to replace newlines
# with '%%%', then undo the replacement when reading the kwargs. This works
# for most cases, but sometimes pickle creates a byte array that can't be
# decoded into utf-8; this happens when trying to pass numpy arrays around,
# for instance. This needs to be fixed in the future, but for now we'll
# restrict the kwargs to be ints, floats, lists, and strings.
try:
strkwargs = pickle.dumps(kwargs, 0).decode(
'utf-8').replace('\n', '%%%')
except UnicodeDecodeError:
raise ValueError('Unable to pickle `kwargs`. Currently the `kwargs` ' +
'values may only be `int`s, `float`s, `string`s, ' +
'`bool`s, or lists of these.')
# Submit the cluster job
pbsfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'run.pbs')
if mpn is not None:
str_n = 'nodes=%d:ppn=%d,feature=%dcore,mem=%dgb' % (
nodes, ppn, ppn, mpn * nodes)
else:
str_n = 'nodes=%d:ppn=%d,feature=%dcore' % (nodes, ppn, ppn)
str_w = 'walltime=%d:00:00' % walltime
str_v = "EVEREST_DAT=%s,NODES=%d," % (EVEREST_DAT, nodes) + \
"EPIC=%d," % (0 if EPIC is None else EPIC) + \
"CAMPAIGN=%d,SUBCAMPAIGN=%d,STRKWARGS='%s'" % \
(campaign, subcampaign, strkwargs)
if EPIC is None:
if subcampaign == -1:
str_name = 'c%02d' % campaign
else:
str_name = 'c%02d.%d' % (campaign, subcampaign)
else:
str_name = 'EPIC%d' % EPIC
str_out = os.path.join(EVEREST_DAT, 'k2', str_name + '.log')
qsub_args = ['qsub', pbsfile,
'-v', str_v,
'-o', str_out,
'-j', 'oe',
'-N', str_name,
'-l', str_n,
'-l', str_w]
if email is not None:
qsub_args.append(['-M', email, '-m', 'ae'])
if queue is not None:
qsub_args += ['-q', queue]
# Now we submit the job
print("Submitting the job...")
subprocess.call(qsub_args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def PrimaryHDU(model):
'''
Construct the primary HDU file containing basic header info.
'''
# Get mission cards
cards = model._mission.HDUCards(model.meta, hdu=0)
if 'KEPMAG' not in [c[0] for c in cards]:
cards.append(('KEPMAG', model.mag, 'Kepler magnitude'))
# Add EVEREST info
cards.append(('COMMENT', '************************'))
cards.append(('COMMENT', '* EVEREST INFO *'))
cards.append(('COMMENT', '************************'))
cards.append(('MISSION', model.mission, 'Mission name'))
cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version'))
cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion'))
cards.append(('DATE', strftime('%Y-%m-%d'),
'EVEREST file creation date (YYYY-MM-DD)'))
# Create the HDU
header = pyfits.Header(cards=cards)
hdu = pyfits.PrimaryHDU(header=header)
return hdu |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def PixelsHDU(model):
'''
Construct the HDU containing the pixel-level light curve.
'''
# Get mission cards
cards = model._mission.HDUCards(model.meta, hdu=2)
# Add EVEREST info
cards = []
cards.append(('COMMENT', '************************'))
cards.append(('COMMENT', '* EVEREST INFO *'))
cards.append(('COMMENT', '************************'))
cards.append(('MISSION', model.mission, 'Mission name'))
cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version'))
cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion'))
cards.append(('DATE', strftime('%Y-%m-%d'),
'EVEREST file creation date (YYYY-MM-DD)'))
# Create the HDU
header = pyfits.Header(cards=cards)
# The pixel timeseries
arrays = [pyfits.Column(name='FPIX', format='%dD' %
model.fpix.shape[1], array=model.fpix)]
# The first order PLD vectors for all the neighbors (npixels, ncadences)
X1N = model.X1N
if X1N is not None:
arrays.append(pyfits.Column(name='X1N', format='%dD' %
X1N.shape[1], array=X1N))
cols = pyfits.ColDefs(arrays)
hdu = pyfits.BinTableHDU.from_columns(cols, header=header, name='PIXELS')
return hdu |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def ApertureHDU(model):
'''
Construct the HDU containing the aperture used to de-trend.
'''
# Get mission cards
cards = model._mission.HDUCards(model.meta, hdu=3)
# Add EVEREST info
cards.append(('COMMENT', '************************'))
cards.append(('COMMENT', '* EVEREST INFO *'))
cards.append(('COMMENT', '************************'))
cards.append(('MISSION', model.mission, 'Mission name'))
cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version'))
cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion'))
cards.append(('DATE', strftime('%Y-%m-%d'),
'EVEREST file creation date (YYYY-MM-DD)'))
# Create the HDU
header = pyfits.Header(cards=cards)
hdu = pyfits.ImageHDU(data=model.aperture,
header=header, name='APERTURE MASK')
return hdu |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def ImagesHDU(model):
'''
Construct the HDU containing sample postage stamp images of the target.
'''
# Get mission cards
cards = model._mission.HDUCards(model.meta, hdu=4)
# Add EVEREST info
cards.append(('COMMENT', '************************'))
cards.append(('COMMENT', '* EVEREST INFO *'))
cards.append(('COMMENT', '************************'))
cards.append(('MISSION', model.mission, 'Mission name'))
cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version'))
cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion'))
cards.append(('DATE', strftime('%Y-%m-%d'),
'EVEREST file creation date (YYYY-MM-DD)'))
# The images
format = '%dD' % model.pixel_images[0].shape[1]
arrays = [pyfits.Column(name='STAMP1', format=format,
array=model.pixel_images[0]),
pyfits.Column(name='STAMP2', format=format,
array=model.pixel_images[1]),
pyfits.Column(name='STAMP3', format=format,
array=model.pixel_images[2])]
# Create the HDU
header = pyfits.Header(cards=cards)
cols = pyfits.ColDefs(arrays)
hdu = pyfits.BinTableHDU.from_columns(
cols, header=header, name='POSTAGE STAMPS')
return hdu |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def HiResHDU(model):
'''
Construct the HDU containing the hi res image of the target.
'''
# Get mission cards
cards = model._mission.HDUCards(model.meta, hdu=5)
# Add EVEREST info
cards.append(('COMMENT', '************************'))
cards.append(('COMMENT', '* EVEREST INFO *'))
cards.append(('COMMENT', '************************'))
cards.append(('MISSION', model.mission, 'Mission name'))
cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version'))
cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion'))
cards.append(('DATE', strftime('%Y-%m-%d'),
'EVEREST file creation date (YYYY-MM-DD)'))
# Create the HDU
header = pyfits.Header(cards=cards)
if model.hires is not None:
hdu = pyfits.ImageHDU(
data=model.hires, header=header, name='HI RES IMAGE')
else:
hdu = pyfits.ImageHDU(data=np.empty(
(0, 0), dtype=float), header=header, name='HI RES IMAGE')
return hdu |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def MaskSolveSlow(A, b, w=5, progress=True, niter=None):
'''
Identical to `MaskSolve`, but computes the solution
the brute-force way.
'''
# Number of data points
N = b.shape[0]
# How many iterations? Default is to go through
# the entire dataset
if niter is None:
niter = N - w + 1
# Our result matrix
X = np.empty((niter, N - w))
# Iterate! The mask at step `n` goes from
# data index `n` to data index `n+w-1` (inclusive).
for n in prange(niter):
mask = np.arange(n, n + w)
An = np.delete(np.delete(A, mask, axis=0), mask, axis=1)
Un = cholesky(An)
bn = np.delete(b, mask)
X[n] = cho_solve((Un, False), bn)
return X |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unmasked(self, depth=0.01):
"""Return the unmasked overfitting metric for a given transit depth.""" |
return 1 - (np.hstack(self._O2) +
np.hstack(self._O3) / depth) / np.hstack(self._O1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def show(self):
"""Show the overfitting PDF summary.""" |
try:
if platform.system().lower().startswith('darwin'):
subprocess.call(['open', self.pdf])
elif os.name == 'nt':
os.startfile(self.pdf)
elif os.name == 'posix':
subprocess.call(['xdg-open', self.pdf])
else:
raise IOError("")
except IOError:
log.info("Unable to open the pdf. Try opening it manually:")
log.info(self.pdf) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def season(self):
""" Return the current observing season. For *K2*, this is the observing campaign, while for *Kepler*, it is the current quarter. """ |
try:
self._season
except AttributeError:
self._season = self._mission.Season(self.ID)
if hasattr(self._season, '__len__'):
raise AttributeError(
"Please choose a campaign/season for this target: %s." %
self._season)
return self._season |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def fcor(self):
'''
The CBV-corrected de-trended flux.
'''
if self.XCBV is None:
return None
else:
return self.flux - self._mission.FitCBVs(self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def plot_info(self, dvs):
'''
Plots miscellaneous de-trending information on the data
validation summary figure.
:param dvs: A :py:class:`dvs.DVS` figure instance
'''
axl, axc, axr = dvs.title()
axc.annotate("%s %d" % (self._mission.IDSTRING, self.ID),
xy=(0.5, 0.5), xycoords='axes fraction',
ha='center', va='center', fontsize=18)
axc.annotate(r"%.2f ppm $\rightarrow$ %.2f ppm" %
(self.cdppr, self.cdpp),
xy=(0.5, 0.2), xycoords='axes fraction',
ha='center', va='center', fontsize=8, color='k',
fontstyle='italic')
axl.annotate("%s %s%02d: %s" %
(self.mission.upper(),
self._mission.SEASONCHAR, self.season, self.name),
xy=(0.5, 0.5), xycoords='axes fraction',
ha='center', va='center', fontsize=12,
color='k')
axl.annotate(self.aperture_name if len(self.neighbors) == 0
else "%s, %d neighbors" %
(self.aperture_name, len(self.neighbors)),
xy=(0.5, 0.2), xycoords='axes fraction',
ha='center', va='center', fontsize=8, color='k',
fontstyle='italic')
axr.annotate("%s %.3f" % (self._mission.MAGSTRING, self.mag),
xy=(0.5, 0.5), xycoords='axes fraction',
ha='center', va='center', fontsize=12,
color='k')
if not np.isnan(self.cdppg) and self.cdppg > 0:
axr.annotate(r"GP %.3f ppm" % (self.cdppg),
xy=(0.5, 0.2), xycoords='axes fraction',
ha='center', va='center', fontsize=8, color='k',
fontstyle='italic') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def compute(self):
'''
Compute the model for the current value of lambda.
'''
# Is there a transit model?
if self.transit_model is not None:
return self.compute_joint()
log.info('Computing the model...')
# Loop over all chunks
model = [None for b in self.breakpoints]
for b, brkpt in enumerate(self.breakpoints):
# Masks for current chunk
m = self.get_masked_chunk(b)
c = self.get_chunk(b)
# This block of the masked covariance matrix
mK = GetCovariance(self.kernel, self.kernel_params,
self.time[m], self.fraw_err[m])
# Get median
med = np.nanmedian(self.fraw[m])
# Normalize the flux
f = self.fraw[m] - med
# The X^2 matrices
A = np.zeros((len(m), len(m)))
B = np.zeros((len(c), len(m)))
# Loop over all orders
for n in range(self.pld_order):
# Only compute up to the current PLD order
if (self.lam_idx >= n) and (self.lam[b][n] is not None):
XM = self.X(n, m)
XC = self.X(n, c)
A += self.lam[b][n] * np.dot(XM, XM.T)
B += self.lam[b][n] * np.dot(XC, XM.T)
del XM, XC
# Compute the model
W = np.linalg.solve(mK + A, f)
model[b] = np.dot(B, W)
# Free up some memory
del A, B, W
# Join the chunks after applying the correct offset
if len(model) > 1:
# First chunk
self.model = model[0][:-self.bpad]
# Center chunks
for m in model[1:-1]:
# Join the chunks at the first non-outlier cadence
i = 1
while len(self.model) - i in self.mask:
i += 1
offset = self.model[-i] - m[self.bpad - i]
self.model = np.concatenate(
[self.model, m[self.bpad:-self.bpad] + offset])
# Last chunk
i = 1
while len(self.model) - i in self.mask:
i += 1
offset = self.model[-i] - model[-1][self.bpad - i]
self.model = np.concatenate(
[self.model, model[-1][self.bpad:] + offset])
else:
self.model = model[0]
# Subtract the global median
self.model -= np.nanmedian(self.model)
# Get the CDPP and reset the weights
self.cdpp_arr = self.get_cdpp_arr()
self.cdpp = self.get_cdpp()
self._weights = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def apply_mask(self, x=None):
'''
Returns the outlier mask, an array of indices corresponding to the
non-outliers.
:param numpy.ndarray x: If specified, returns the masked version of \
:py:obj:`x` instead. Default :py:obj:`None`
'''
if x is None:
return np.delete(np.arange(len(self.time)), self.mask)
else:
return np.delete(x, self.mask, axis=0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_cdpp(self, flux=None):
'''
Returns the scalar CDPP for the light curve.
'''
if flux is None:
flux = self.flux
return self._mission.CDPP(self.apply_mask(flux), cadence=self.cadence) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get(ID, pipeline='everest2', campaign=None):
'''
Returns the `time` and `flux` for a given EPIC `ID` and
a given `pipeline` name.
'''
log.info('Downloading %s light curve for %d...' % (pipeline, ID))
# Dev version hack
if EVEREST_DEV:
if pipeline.lower() == 'everest2' or pipeline.lower() == 'k2sff':
from . import Season, TargetDirectory, FITSFile
if campaign is None:
campaign = Season(ID)
fits = os.path.join(TargetDirectory(
ID, campaign), FITSFile(ID, campaign))
newdir = os.path.join(KPLR_ROOT, "data", "everest", str(ID))
if not os.path.exists(newdir):
os.makedirs(newdir)
if os.path.exists(fits):
shutil.copy(fits, newdir)
if pipeline.lower() == 'everest2':
s = k2plr.EVEREST(ID, version=2, sci_campaign=campaign)
time = s.time
flux = s.flux
elif pipeline.lower() == 'everest1':
s = k2plr.EVEREST(ID, version=1, sci_campaign=campaign)
time = s.time
flux = s.flux
elif pipeline.lower() == 'k2sff':
s = k2plr.K2SFF(ID, sci_campaign=campaign)
time = s.time
flux = s.fcor
# Normalize to the median flux
s = k2plr.EVEREST(ID, version=2, sci_campaign=campaign)
flux *= np.nanmedian(s.flux)
elif pipeline.lower() == 'k2sc':
s = k2plr.K2SC(ID, sci_campaign=campaign)
time = s.time
flux = s.pdcflux
elif pipeline.lower() == 'raw':
s = k2plr.EVEREST(ID, version=2, raw=True, sci_campaign=campaign)
time = s.time
flux = s.flux
else:
raise ValueError('Invalid pipeline: `%s`.' % pipeline)
return time, flux |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def plot(ID, pipeline='everest2', show=True, campaign=None):
'''
Plots the de-trended flux for the given EPIC `ID` and for
the specified `pipeline`.
'''
# Get the data
time, flux = get(ID, pipeline=pipeline, campaign=campaign)
# Remove nans
mask = np.where(np.isnan(flux))[0]
time = np.delete(time, mask)
flux = np.delete(flux, mask)
# Plot it
fig, ax = pl.subplots(1, figsize=(10, 4))
fig.subplots_adjust(bottom=0.15)
ax.plot(time, flux, "k.", markersize=3, alpha=0.5)
# Axis limits
N = int(0.995 * len(flux))
hi, lo = flux[np.argsort(flux)][[N, -N]]
pad = (hi - lo) * 0.1
ylim = (lo - pad, hi + pad)
ax.set_ylim(ylim)
# Show the CDPP
from .k2 import CDPP
ax.annotate('%.2f ppm' % CDPP(flux),
xy=(0.98, 0.975), xycoords='axes fraction',
ha='right', va='top', fontsize=12, color='r', zorder=99)
# Appearance
ax.margins(0, None)
ax.set_xlabel("Time (BJD - 2454833)", fontsize=16)
ax.set_ylabel("%s Flux" % pipeline.upper(), fontsize=16)
fig.canvas.set_window_title("%s: EPIC %d" % (pipeline.upper(), ID))
if show:
pl.show()
pl.close()
else:
return fig, ax |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_outliers(self):
'''
Performs iterative sigma clipping to get outliers.
'''
log.info("Clipping outliers...")
log.info('Iter %d/%d: %d outliers' %
(0, self.oiter, len(self.outmask)))
def M(x): return np.delete(x, np.concatenate(
[self.nanmask, self.badmask, self.transitmask]), axis=0)
t = M(self.time)
outmask = [np.array([-1]), np.array(self.outmask)]
# Loop as long as the last two outlier arrays aren't equal
while not np.array_equal(outmask[-2], outmask[-1]):
# Check if we've done this too many times
if len(outmask) - 1 > self.oiter:
log.error('Maximum number of iterations in ' +
'``get_outliers()`` exceeded. Skipping...')
break
# Check if we're going in circles
if np.any([np.array_equal(outmask[-1], i) for i in outmask[:-1]]):
log.error('Function ``get_outliers()`` ' +
'is going in circles. Skipping...')
break
# Compute the model to get the flux
self.compute()
# Get the outliers
f = SavGol(M(self.flux))
med = np.nanmedian(f)
MAD = 1.4826 * np.nanmedian(np.abs(f - med))
inds = np.where((f > med + self.osigma * MAD) |
(f < med - self.osigma * MAD))[0]
# Project onto unmasked time array
inds = np.array([np.argmax(self.time == t[i]) for i in inds])
self.outmask = np.array(inds, dtype=int)
# Add them to the running list
outmask.append(np.array(inds))
# Log
log.info('Iter %d/%d: %d outliers' %
(len(outmask) - 2, self.oiter, len(self.outmask))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_ylim(self):
'''
Computes the ideal y-axis limits for the light curve plot. Attempts to
set the limits equal to those of the raw light curve, but if more than
1% of the flux lies either above or below these limits, auto-expands
to include those points. At the end, adds 5% padding to both the
top and the bottom.
'''
bn = np.array(
list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int)
fraw = np.delete(self.fraw, bn)
lo, hi = fraw[np.argsort(fraw)][[3, -3]]
flux = np.delete(self.flux, bn)
fsort = flux[np.argsort(flux)]
if fsort[int(0.01 * len(fsort))] < lo:
lo = fsort[int(0.01 * len(fsort))]
if fsort[int(0.99 * len(fsort))] > hi:
hi = fsort[int(0.99 * len(fsort))]
pad = (hi - lo) * 0.05
ylim = (lo - pad, hi + pad)
return ylim |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def plot_cbv(self, ax, flux, info, show_cbv=False):
'''
Plots the final CBV-corrected light curve.
'''
# Plot the light curve
bnmask = np.array(
list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int)
def M(x): return np.delete(x, bnmask)
if self.cadence == 'lc':
ax.plot(M(self.time), M(flux), ls='none', marker='.',
color='k', markersize=2, alpha=0.45)
else:
ax.plot(M(self.time), M(flux), ls='none', marker='.',
color='k', markersize=2, alpha=0.03, zorder=-1)
ax.set_rasterization_zorder(0)
# Hack: Plot invisible first and last points to ensure
# the x axis limits are the
# same in the other plots, where we also plot outliers!
ax.plot(self.time[0], np.nanmedian(M(flux)), marker='.', alpha=0)
ax.plot(self.time[-1], np.nanmedian(M(flux)), marker='.', alpha=0)
# Show CBV fit?
if show_cbv:
ax.plot(self.time, self._mission.FitCBVs(
self) + np.nanmedian(flux), 'r-', alpha=0.2)
# Appearance
ax.annotate(info, xy=(0.98, 0.025), xycoords='axes fraction',
ha='right', va='bottom', fontsize=10, alpha=0.5,
fontweight='bold')
ax.margins(0.01, 0.1)
# Get y lims that bound 99% of the flux
flux = np.delete(flux, bnmask)
N = int(0.995 * len(flux))
hi, lo = flux[np.argsort(flux)][[N, -N]]
fsort = flux[np.argsort(flux)]
pad = (hi - lo) * 0.2
ylim = (lo - pad, hi + pad)
ax.set_ylim(ylim)
ax.get_yaxis().set_major_formatter(Formatter.Flux)
ax.set_xlabel(r'Time (%s)' % self._mission.TIMEUNITS, fontsize=9)
for tick in ax.get_xticklabels() + ax.get_yticklabels():
tick.set_fontsize(7) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def load_tpf(self):
'''
Loads the target pixel file.
'''
if not self.loaded:
if self._data is not None:
data = self._data
else:
data = self._mission.GetData(
self.ID, season=self.season,
cadence=self.cadence,
clobber=self.clobber_tpf,
aperture_name=self.aperture_name,
saturated_aperture_name=self.saturated_aperture_name,
max_pixels=self.max_pixels,
saturation_tolerance=self.saturation_tolerance,
get_hires=self.get_hires,
get_nearby=self.get_nearby)
if data is None:
raise Exception("Unable to retrieve target data.")
self.cadn = data.cadn
self.time = data.time
self.model = np.zeros_like(self.time)
self.fpix = data.fpix
self.fraw = np.sum(self.fpix, axis=1)
self.fpix_err = data.fpix_err
self.fraw_err = np.sqrt(np.sum(self.fpix_err ** 2, axis=1))
self.nanmask = data.nanmask
self.badmask = data.badmask
self.transitmask = np.array([], dtype=int)
self.outmask = np.array([], dtype=int)
self.aperture = data.aperture
self.aperture_name = data.aperture_name
self.apertures = data.apertures
self.quality = data.quality
self.Xpos = data.Xpos
self.Ypos = data.Ypos
self.mag = data.mag
self.pixel_images = data.pixel_images
self.nearby = data.nearby
self.hires = data.hires
self.saturated = data.saturated
self.meta = data.meta
self.bkg = data.bkg
# Update the last breakpoint to the correct value
self.breakpoints[-1] = len(self.time) - 1
# Get PLD normalization
self.get_norm()
self.loaded = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def load_model(self, name=None):
'''
Loads a saved version of the model.
'''
if self.clobber:
return False
if name is None:
name = self.name
file = os.path.join(self.dir, '%s.npz' % name)
if os.path.exists(file):
if not self.is_parent:
log.info("Loading '%s.npz'..." % name)
try:
data = np.load(file)
for key in data.keys():
try:
setattr(self, key, data[key][()])
except NotImplementedError:
pass
# HACK: Backwards compatibility. Previous version stored
# the CDPP in the `cdpp6`
# and `cdpp6_arr` attributes. Let's move them over.
if hasattr(self, 'cdpp6'):
self.cdpp = self.cdpp6
del self.cdpp6
if hasattr(self, 'cdpp6_arr'):
self.cdpp_arr = np.array(self.cdpp6_arr)
del self.cdpp6_arr
if hasattr(self, 'gppp'):
self.cdppg = self.gppp
del self.gppp
# HACK: At one point we were saving the figure instances,
# so loading the .npz
# opened a plotting window. I don't think this is the case
# any more, so this
# next line should be removed in the future...
pl.close()
return True
except:
log.warn("Error loading '%s.npz'." % name)
exctype, value, tb = sys.exc_info()
for line in traceback.format_exception_only(exctype, value):
ln = line.replace('\n', '')
log.warn(ln)
os.rename(file, file + '.bad')
if self.is_parent:
raise Exception(
'Unable to load `%s` model for target %d.'
% (self.name, self.ID))
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def save_model(self):
'''
Saves all of the de-trending information to disk in an `npz` file
and saves the DVS as a `pdf`.
'''
# Save the data
log.info("Saving data to '%s.npz'..." % self.name)
d = dict(self.__dict__)
d.pop('_weights', None)
d.pop('_A', None)
d.pop('_B', None)
d.pop('_f', None)
d.pop('_mK', None)
d.pop('K', None)
d.pop('dvs', None)
d.pop('clobber', None)
d.pop('clobber_tpf', None)
d.pop('_mission', None)
d.pop('debug', None)
d.pop('transit_model', None)
d.pop('_transit_model', None)
np.savez(os.path.join(self.dir, self.name + '.npz'), **d)
# Save the DVS
pdf = PdfPages(os.path.join(self.dir, self.name + '.pdf'))
pdf.savefig(self.dvs.fig)
pl.close(self.dvs.fig)
d = pdf.infodict()
d['Title'] = 'EVEREST: %s de-trending of %s %d' % (
self.name, self._mission.IDSTRING, self.ID)
d['Author'] = 'Rodrigo Luger'
pdf.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def exception_handler(self, pdb):
'''
A custom exception handler.
:param pdb: If :py:obj:`True`, enters PDB post-mortem \
mode for debugging.
'''
# Grab the exception
exctype, value, tb = sys.exc_info()
# Log the error and create a .err file
errfile = os.path.join(self.dir, self.name + '.err')
with open(errfile, 'w') as f:
for line in traceback.format_exception_only(exctype, value):
ln = line.replace('\n', '')
log.error(ln)
print(ln, file=f)
for line in traceback.format_tb(tb):
ln = line.replace('\n', '')
log.error(ln)
print(ln, file=f)
# Re-raise?
if pdb:
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def init_kernel(self):
'''
Initializes the covariance matrix with a guess at
the GP kernel parameters.
'''
if self.kernel_params is None:
X = self.apply_mask(self.fpix / self.flux.reshape(-1, 1))
y = self.apply_mask(self.flux) - np.dot(X, np.linalg.solve(
np.dot(X.T, X), np.dot(X.T, self.apply_mask(self.flux))))
white = np.nanmedian([np.nanstd(c) for c in Chunks(y, 13)])
amp = self.gp_factor * np.nanstd(y)
tau = 30.0
if self.kernel == 'Basic':
self.kernel_params = [white, amp, tau]
elif self.kernel == 'QuasiPeriodic':
self.kernel_params = [white, amp, 1., 20.] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def run(self):
'''
Runs the de-trending step.
'''
try:
# Load raw data
log.info("Loading target data...")
self.load_tpf()
self.mask_planets()
self.plot_aperture([self.dvs.top_right() for i in range(4)])
self.init_kernel()
M = self.apply_mask(np.arange(len(self.time)))
self.cdppr_arr = self.get_cdpp_arr()
self.cdpp_arr = np.array(self.cdppr_arr)
self.cdppv_arr = np.array(self.cdppr_arr)
self.cdppr = self.get_cdpp()
self.cdpp = self.cdppr
self.cdppv = self.cdppr
log.info("%s (Raw): CDPP = %s" % (self.name, self.cdpps))
self.plot_lc(self.dvs.left(), info_right='Raw', color='k')
# Loop
for n in range(self.pld_order):
self.lam_idx += 1
self.get_outliers()
if n > 0 and self.optimize_gp:
self.update_gp()
self.cross_validate(self.dvs.right(), info='CV%d' % n)
self.cdpp_arr = self.get_cdpp_arr()
self.cdppv_arr *= self.cdpp_arr
self.cdpp = self.get_cdpp()
self.cdppv = np.nanmean(self.cdppv_arr)
log.info("%s (%d/%d): CDPP = %s" %
(self.name, n + 1, self.pld_order, self.cdpps))
self.plot_lc(self.dvs.left(), info_right='LC%d' % (
n + 1), info_left='%d outliers' % len(self.outmask))
# Save
self.finalize()
self.plot_final(self.dvs.top_left())
self.plot_info(self.dvs)
self.save_model()
except:
self.exception_handler(self.debug) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def publish(self, **kwargs):
'''
Correct the light curve with the CBVs, generate a
cover page for the DVS figure,
and produce a FITS file for publication.
'''
try:
# HACK: Force these params for publication
self.cbv_win = 999
self.cbv_order = 3
self.cbv_num = 1
# Get the CBVs
self._mission.GetTargetCBVs(self)
# Plot the final corrected light curve
cbv = CBV()
self.plot_info(cbv)
self.plot_cbv(cbv.body(), self.fcor, 'Corrected')
self.plot_cbv(cbv.body(), self.flux, 'De-trended', show_cbv=True)
self.plot_cbv(cbv.body(), self.fraw, 'Raw')
# Save the CBV pdf
pdf = PdfPages(os.path.join(self.dir, 'cbv.pdf'))
pdf.savefig(cbv.fig)
pl.close(cbv.fig)
d = pdf.infodict()
d['Title'] = 'EVEREST: %s de-trending of %s %d' % (
self.name, self._mission.IDSTRING, self.ID)
d['Author'] = 'Rodrigo Luger'
pdf.close()
# Now merge the two PDFs
assert os.path.exists(os.path.join(
self.dir, self.name + '.pdf')), \
"Unable to locate %s.pdf." % self.name
output = PdfFileWriter()
pdfOne = PdfFileReader(os.path.join(self.dir, 'cbv.pdf'))
pdfTwo = PdfFileReader(os.path.join(self.dir, self.name + '.pdf'))
# Add the CBV page
output.addPage(pdfOne.getPage(0))
# Add the original DVS page
output.addPage(pdfTwo.getPage(pdfTwo.numPages - 1))
# Write the final PDF
outputStream = open(os.path.join(self.dir, self._mission.DVSFile(
self.ID, self.season, self.cadence)), "wb")
output.write(outputStream)
outputStream.close()
os.remove(os.path.join(self.dir, 'cbv.pdf'))
# Make the FITS file
MakeFITS(self)
except:
self.exception_handler(self.debug) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def run(self):
'''
Runs the de-trending.
'''
try:
# Plot original
self.plot_aperture([self.dvs.top_right() for i in range(4)])
self.plot_lc(self.dvs.left(), info_right='nPLD', color='k')
# Cross-validate
self.cross_validate(self.dvs.right())
self.compute()
self.cdpp_arr = self.get_cdpp_arr()
self.cdpp = self.get_cdpp()
# Plot new
self.plot_lc(self.dvs.left(), info_right='Powell', color='k')
# Save
self.plot_final(self.dvs.top_left())
self.plot_info(self.dvs)
self.save_model()
except:
self.exception_handler(self.debug) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def validation_scatter(self, log_lam, b, masks, pre_v, gp, flux,
time, med):
'''
Computes the scatter in the validation set.
'''
# Update the lambda matrix
self.lam[b] = 10 ** log_lam
# Validation set scatter
scatter = [None for i in range(len(masks))]
for i in range(len(masks)):
model = self.cv_compute(b, *pre_v[i])
try:
gpm, _ = gp.predict(flux - model - med, time[masks[i]])
except ValueError:
# Sometimes the model can have NaNs if
# `lambda` is a crazy value
return 1.e30
fdet = (flux - model)[masks[i]] - gpm
scatter[i] = 1.e6 * (1.4826 * np.nanmedian(np.abs(fdet / med -
np.nanmedian(fdet / med))) /
np.sqrt(len(masks[i])))
return np.max(scatter) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def iterdirty(self):
'''Ordered iterator over dirty elements.'''
return iter(chain(itervalues(self._new), itervalues(self._modified))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def commit(self, callback=None):
'''Close the transaction and commit session to the backend.'''
if self.executed:
raise InvalidTransaction('Invalid operation. '
'Transaction already executed.')
session = self.session
self.session = None
self.on_result = self._commit(session, callback)
return self.on_result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def load_related(self, meta, fname, data, fields, encoding):
'''Parse data for related objects.'''
field = meta.dfields[fname]
if field in meta.multifields:
fmeta = field.structure_class()._meta
if fmeta.name in ('hashtable', 'zset'):
return ((native_str(id, encoding),
pairs_to_dict(fdata, encoding)) for
id, fdata in data)
else:
return ((native_str(id, encoding), fdata) for
id, fdata in data)
else:
# this is data for stdmodel instances
return self.build(data, meta, fields, fields, encoding) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _execute_query(self):
'''Execute the query without fetching data. Returns the number of
elements in the query.'''
pipe = self.pipe
if not self.card:
if self.meta.ordering:
self.ismember = getattr(self.backend.client, 'zrank')
self.card = getattr(pipe, 'zcard')
self._check_member = self.zism
else:
self.ismember = getattr(self.backend.client, 'sismember')
self.card = getattr(pipe, 'scard')
self._check_member = self.sism
else:
self.ismember = None
self.card(self.query_key)
result = yield pipe.execute()
yield result[-1] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def order(self, last):
'''Perform ordering with respect model fields.'''
desc = last.desc
field = last.name
nested = last.nested
nested_args = []
while nested:
meta = nested.model._meta
nested_args.extend((self.backend.basekey(meta), nested.name))
last = nested
nested = nested.nested
method = 'ALPHA' if last.field.internal_type == 'text' else ''
if field == last.model._meta.pkname():
field = ''
return {'field': field,
'method': method,
'desc': desc,
'nested': nested_args} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def related_lua_args(self):
'''Generator of load_related arguments'''
related = self.queryelem.select_related
if related:
meta = self.meta
for rel in related:
field = meta.dfields[rel]
relmodel = field.relmodel
bk = self.backend.basekey(relmodel._meta) if relmodel else ''
fields = list(related[rel])
if meta.pkname() in fields:
fields.remove(meta.pkname())
if not fields:
fields.append('')
ftype = field.type if field in meta.multifields else ''
data = {'field': field.attname, 'type': ftype,
'bk': bk, 'fields': fields}
yield field.name, data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def pop_range(self, start, stop=None, withscores=True, **options):
'''Remove and return a range from the ordered set by score.'''
return self.backend.execute(
self.client.zpopbyscore(self.id, start, stop,
withscores=withscores, **options),
partial(self._range, withscores)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def execute_session(self, session_data):
'''Execute a session in redis.'''
pipe = self.client.pipeline()
for sm in session_data: # loop through model sessions
meta = sm.meta
if sm.structures:
self.flush_structure(sm, pipe)
delquery = None
if sm.deletes is not None:
delquery = sm.deletes.backend_query(pipe=pipe)
self.accumulate_delete(pipe, delquery)
if sm.dirty:
meta_info = json.dumps(self.meta(meta))
lua_data = [len(sm.dirty)]
processed = []
for instance in sm.dirty:
state = instance.get_state()
if not meta.is_valid(instance):
raise FieldValueError(
json.dumps(instance._dbdata['errors']))
score = MIN_FLOAT
if meta.ordering:
if meta.ordering.auto:
score = meta.ordering.name.incrby
else:
v = getattr(instance, meta.ordering.name, None)
if v is not None:
score = meta.ordering.field.scorefun(v)
data = instance._dbdata['cleaned_data']
action = state.action
prev_id = state.iid if state.persistent else ''
id = instance.pkvalue() or ''
data = flat_mapping(data)
lua_data.extend((action, prev_id, id, score, len(data)))
lua_data.extend(data)
processed.append(state.iid)
self.odmrun(pipe, 'commit', meta, (), meta_info,
*lua_data, iids=processed)
return pipe.execute() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def flush(self, meta=None):
'''Flush all model keys from the database'''
pattern = self.basekey(meta) if meta else self.namespace
return self.client.delpattern('%s*' % pattern) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def GetCovariance(kernel, kernel_params, time, errors):
'''
Returns the covariance matrix for a given light curve
segment.
:param array_like kernel_params: A list of kernel parameters \
(white noise amplitude, red noise amplitude, and red noise timescale)
:param array_like time: The time array (*N*)
:param array_like errors: The data error array (*N*)
:returns: The covariance matrix :py:obj:`K` (*N*,*N*)
'''
# NOTE: We purposefully compute the covariance matrix
# *without* the GP white noise term
K = np.diag(errors ** 2)
K += GP(kernel, kernel_params, white=False).get_matrix(time)
return K |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def NegLnLike(x, time, flux, errors, kernel):
'''
Returns the negative log-likelihood function and its gradient.
'''
gp = GP(kernel, x, white=True)
gp.compute(time, errors)
if OLDGEORGE:
nll = -gp.lnlikelihood(flux)
# NOTE: There was a bug on this next line! Used to be
#
# ngr = -gp.grad_lnlikelihood(flux) / gp.kernel.pars
#
# But I think we want
#
# dlogL/dx = dlogL/dlogx^2 * dlogx^2/dx^2 * dx^2/dx
# = gp.grad_lnlikelihood() * 1/x^2 * 2x
# = 2 * gp.grad_lnlikelihood() / x
# = 2 * gp.grad_lnlikelihood() / np.sqrt(x^2)
# = 2 * gp.grad_lnlikelihood() / np.sqrt(gp.kernel.pars)
#
# (with a negative sign out front for the negative gradient).
# So we probably weren't optimizing the GP correctly! This affects
# all campaigns through C13. It's not a *huge* deal, since the sign
# of the gradient was correct and the model isn't that sensitive to
# the value of the hyperparameters, but it may have contributed to
# the poor performance on super variable stars. In most cases it means
# the solver takes longer to converge and isn't as good at finding
# the minimum.
ngr = -2 * gp.grad_lnlikelihood(flux) / np.sqrt(gp.kernel.pars)
else:
nll = -gp.log_likelihood(flux)
ngr = -2 * gp.grad_log_likelihood(flux) / \
np.sqrt(np.exp(gp.get_parameter_vector()))
return nll, ngr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def missing_intervals(startdate, enddate, start, end,
dateconverter=None,
parseinterval=None,
intervals=None):
'''Given a ``startdate`` and an ``enddate`` dates, evaluate the
date intervals from which data is not available. It return a list of
two-dimensional tuples containing start and end date for the interval.
The list could countain 0,1 or 2 tuples.'''
parseinterval = parseinterval or default_parse_interval
dateconverter = dateconverter or todate
startdate = dateconverter(parseinterval(startdate, 0))
enddate = max(startdate, dateconverter(parseinterval(enddate, 0)))
if intervals is not None and not isinstance(intervals, Intervals):
intervals = Intervals(intervals)
calc_intervals = Intervals()
# we have some history already
if start:
# the startdate not available
if startdate < start:
calc_start = startdate
calc_end = parseinterval(start, -1)
if calc_end >= calc_start:
calc_intervals.append(Interval(calc_start, calc_end))
if enddate > end:
calc_start = parseinterval(end, 1)
calc_end = enddate
if calc_end >= calc_start:
calc_intervals.append(Interval(calc_start, calc_end))
else:
start = startdate
end = enddate
calc_intervals.append(Interval(startdate, enddate))
if calc_intervals:
if intervals:
calc_intervals.extend(intervals)
elif intervals:
calc_intervals = intervals
return calc_intervals |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def InitLog(file_name=None, log_level=logging.DEBUG,
screen_level=logging.CRITICAL, pdb=False):
'''
A little routine to initialize the logging functionality.
:param str file_name: The name of the file to log to. \
Default :py:obj:`None` (set internally by :py:mod:`everest`)
:param int log_level: The file logging level (0-50). Default 10 (debug)
:param int screen_level: The screen logging level (0-50). \
Default 50 (critical)
'''
# Initialize the logging
root = logging.getLogger()
root.handlers = []
root.setLevel(logging.DEBUG)
# File handler
if file_name is not None:
if not os.path.exists(os.path.dirname(file_name)):
os.makedirs(os.path.dirname(file_name))
fh = logging.FileHandler(file_name)
fh.setLevel(log_level)
fh_formatter = logging.Formatter(
"%(asctime)s %(levelname)-5s [%(name)s.%(funcName)s()]: %(message)s",
datefmt="%m/%d/%y %H:%M:%S")
fh.setFormatter(fh_formatter)
fh.addFilter(NoPILFilter())
root.addHandler(fh)
# Screen handler
sh = logging.StreamHandler(sys.stdout)
if pdb:
sh.setLevel(logging.DEBUG)
else:
sh.setLevel(screen_level)
sh_formatter = logging.Formatter(
"%(levelname)-5s [%(name)s.%(funcName)s()]: %(message)s")
sh.setFormatter(sh_formatter)
sh.addFilter(NoPILFilter())
root.addHandler(sh)
# Set exception hook
if pdb:
sys.excepthook = ExceptionHookPDB
else:
sys.excepthook = ExceptionHook |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def ExceptionHook(exctype, value, tb):
'''
A custom exception handler that logs errors to file.
'''
for line in traceback.format_exception_only(exctype, value):
log.error(line.replace('\n', ''))
for line in traceback.format_tb(tb):
log.error(line.replace('\n', ''))
sys.__excepthook__(exctype, value, tb) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def prange(*x):
'''
Progress bar range with `tqdm`
'''
try:
root = logging.getLogger()
if len(root.handlers):
for h in root.handlers:
if (type(h) is logging.StreamHandler) and \
(h.level != logging.CRITICAL):
from tqdm import tqdm
return tqdm(range(*x))
return range(*x)
else:
from tqdm import tqdm
return tqdm(range(*x))
except ImportError:
return range(*x) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def back(self, *fields):
'''Return the back pair of the structure'''
ts = self.irange(-1, -1, fields=fields)
if ts:
return ts.end(), ts[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Search(ID, mission='k2'):
"""Why is my target not in the EVEREST database?""" |
# Only K2 supported for now
assert mission == 'k2', "Only the K2 mission is supported for now."
print("Searching for target %d..." % ID)
# First check if it is in the database
season = missions.k2.Season(ID)
if season in [91, 92, [91, 92]]:
print("Campaign 9 is currently not part of the EVEREST catalog.")
return
elif season == 101:
print("The first half of campaign 10 is not currently part of " +
"the EVEREST catalog.")
return
elif season is not None:
print("Target is in campaign %d of the EVEREST catalog." % season)
return
# Get the kplr object
star = k2plr_client.k2_star(ID)
# First check if this is a star
if star.objtype.lower() != "star":
print("Target is of type %s, not STAR, " % star.objtype +
"and is therefore not included in the EVEREST catalog.")
return
# Let's try to download the pixel data and see what happens
try:
tpf = star.get_target_pixel_files()
except:
print("Unable to download the raw pixel files for this target.")
return
if len(tpf) == 0:
print("Raw pixel files are not available for this target. Looks like " +
"data may not have been collected for it.")
return
# Perhaps it's in a campaign we haven't gotten to yet
if tpf[0].sci_campaign not in missions.k2.SEASONS:
print("Targets for campaign %d are not yet available."
% tpf[0].sci_campaign)
return
# Let's try to download the K2SFF data
try:
k2sff = k2plr.K2SFF(ID)
except:
print("Error downloading the K2SFF light curve for this target. " +
"Currently, EVEREST uses the K2SFF apertures to perform " +
"photometry. This is likely to change in the next version.")
return
# Let's try to get the aperture
try:
assert np.count_nonzero(k2sff.apertures[15]), "Invalid aperture."
except:
print("Unable to retrieve the K2SFF aperture for this target. " +
"Currently, EVEREST uses the K2SFF apertures to perform " +
"photometry. This is likely to change in the next version.")
return
# Perhaps the star is *super* saturated and we didn't bother
# de-trending it?
if star.kp < 8:
print("Target has Kp = %.1f and is too saturated " +
"for proper de-trending with EVEREST.")
return
# I'm out of ideas
print("I'm not sure why this target isn't in the EVEREST catalog." +
"You can try de-trending it yourself:")
print("http://faculty.washington.edu/rodluger/everest/pipeline.html")
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _get_norm(self):
'''
Computes the PLD flux normalization array.
..note :: `iPLD` model **only**.
'''
log.info('Computing the PLD normalization...')
# Loop over all chunks
mod = [None for b in self.breakpoints]
for b, brkpt in enumerate(self.breakpoints):
# Unmasked chunk
c = self.get_chunk(b)
# Masked chunk (original mask plus user transit mask)
inds = np.array(
list(set(np.concatenate([self.transitmask,
self.recmask]))), dtype=int)
M = np.delete(np.arange(len(self.time)), inds, axis=0)
if b > 0:
m = M[(M > self.breakpoints[b - 1] - self.bpad)
& (M <= self.breakpoints[b] + self.bpad)]
else:
m = M[M <= self.breakpoints[b] + self.bpad]
# This block of the masked covariance matrix
mK = GetCovariance(self.kernel, self.kernel_params,
self.time[m], self.fraw_err[m])
# Get median
med = np.nanmedian(self.fraw[m])
# Normalize the flux
f = self.fraw[m] - med
# The X^2 matrices
A = np.zeros((len(m), len(m)))
B = np.zeros((len(c), len(m)))
# Loop over all orders
for n in range(self.pld_order):
XM = self.X(n, m)
XC = self.X(n, c)
A += self.reclam[b][n] * np.dot(XM, XM.T)
B += self.reclam[b][n] * np.dot(XC, XM.T)
del XM, XC
W = np.linalg.solve(mK + A, f)
mod[b] = np.dot(B, W)
del A, B, W
# Join the chunks after applying the correct offset
if len(mod) > 1:
# First chunk
model = mod[0][:-self.bpad]
# Center chunks
for m in mod[1:-1]:
offset = model[-1] - m[self.bpad - 1]
model = np.concatenate(
[model, m[self.bpad:-self.bpad] + offset])
# Last chunk
offset = model[-1] - mod[-1][self.bpad - 1]
model = np.concatenate([model, mod[-1][self.bpad:] + offset])
else:
model = mod[0]
# Subtract the global median
model -= np.nanmedian(model)
# Save the norm
self._norm = self.fraw - model |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def plot_pipeline(self, pipeline, *args, **kwargs):
'''
Plots the light curve for the target de-trended with a given pipeline.
:param str pipeline: The name of the pipeline (lowercase). Options \
are 'everest2', 'everest1', and other mission-specific \
pipelines. For `K2`, the available pipelines are 'k2sff' \
and 'k2sc'.
Additional :py:obj:`args` and :py:obj:`kwargs` are passed directly to
the :py:func:`pipelines.plot` function of the mission.
'''
if pipeline != 'everest2':
return getattr(missions, self.mission).pipelines.plot(self.ID,
pipeline,
*args,
**kwargs)
else:
# We're going to plot the everest 2 light curve like we plot
# the other pipelines for easy comparison
plot_raw = kwargs.get('plot_raw', False)
plot_cbv = kwargs.get('plot_cbv', True)
show = kwargs.get('show', True)
if plot_raw:
y = self.fraw
ylabel = 'Raw Flux'
elif plot_cbv:
y = self.fcor
ylabel = "EVEREST2 Flux"
else:
y = self.flux
ylabel = "EVEREST2 Flux"
# Remove nans
bnmask = np.concatenate([self.nanmask, self.badmask])
time = np.delete(self.time, bnmask)
flux = np.delete(y, bnmask)
# Plot it
fig, ax = pl.subplots(1, figsize=(10, 4))
fig.subplots_adjust(bottom=0.15)
ax.plot(time, flux, "k.", markersize=3, alpha=0.5)
# Axis limits
N = int(0.995 * len(flux))
hi, lo = flux[np.argsort(flux)][[N, -N]]
pad = (hi - lo) * 0.1
ylim = (lo - pad, hi + pad)
ax.set_ylim(ylim)
# Plot bad data points
ax.plot(self.time[self.badmask], y[self.badmask],
"r.", markersize=3, alpha=0.2)
# Show the CDPP
ax.annotate('%.2f ppm' % self._mission.CDPP(flux),
xy=(0.98, 0.975), xycoords='axes fraction',
ha='right', va='top', fontsize=12, color='r',
zorder=99)
# Appearance
ax.margins(0, None)
ax.set_xlabel("Time (%s)" % self._mission.TIMEUNITS, fontsize=16)
ax.set_ylabel(ylabel, fontsize=16)
fig.canvas.set_window_title("EVEREST2: EPIC %d" % (self.ID))
if show:
pl.show()
pl.close()
else:
return fig, ax |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_pipeline(self, *args, **kwargs):
'''
Returns the `time` and `flux` arrays for the target obtained by a given
pipeline.
Options :py:obj:`args` and :py:obj:`kwargs` are passed directly to
the :py:func:`pipelines.get` function of the mission.
'''
return getattr(missions, self.mission).pipelines.get(self.ID, *args,
**kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _save_npz(self):
'''
Saves all of the de-trending information to disk in an `npz` file
'''
# Save the data
d = dict(self.__dict__)
d.pop('_weights', None)
d.pop('_A', None)
d.pop('_B', None)
d.pop('_f', None)
d.pop('_mK', None)
d.pop('K', None)
d.pop('dvs', None)
d.pop('clobber', None)
d.pop('clobber_tpf', None)
d.pop('_mission', None)
d.pop('debug', None)
np.savez(os.path.join(self.dir, self.name + '.npz'), **d) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def Interpolate(time, mask, y):
'''
Masks certain elements in the array `y` and linearly
interpolates over them, returning an array `y'` of the
same length.
:param array_like time: The time array
:param array_like mask: The indices to be interpolated over
:param array_like y: The dependent array
'''
# Ensure `y` doesn't get modified in place
yy = np.array(y)
t_ = np.delete(time, mask)
y_ = np.delete(y, mask, axis=0)
if len(yy.shape) == 1:
yy[mask] = np.interp(time[mask], t_, y_)
elif len(yy.shape) == 2:
for n in range(yy.shape[1]):
yy[mask, n] = np.interp(time[mask], t_, y_[:, n])
else:
raise Exception("Array ``y`` must be either 1- or 2-d.")
return yy |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def Smooth(x, window_len=100, window='hanning'):
'''
Smooth data by convolving on a given timescale.
:param ndarray x: The data array
:param int window_len: The size of the smoothing window. Default `100`
:param str window: The window type. Default `hanning`
'''
if window_len == 0:
return np.zeros_like(x)
s = np.r_[2 * x[0] - x[window_len - 1::-1],
x, 2 * x[-1] - x[-1:-window_len:-1]]
if window == 'flat':
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
y = np.convolve(w / w.sum(), s, mode='same')
return y[window_len:-window_len + 1] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def SavGol(y, win=49):
'''
Subtracts a second order Savitsky-Golay filter with window size `win`
and returns the result. This acts as a high pass filter.
'''
if len(y) >= win:
return y - savgol_filter(y, win, 2) + np.nanmedian(y)
else:
return y |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def NumRegressors(npix, pld_order, cross_terms=True):
'''
Return the number of regressors for `npix` pixels
and PLD order `pld_order`.
:param bool cross_terms: Include pixel cross-terms? Default :py:obj:`True`
'''
res = 0
for k in range(1, pld_order + 1):
if cross_terms:
res += comb(npix + k - 1, k)
else:
res += npix
return int(res) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def Downbin(x, newsize, axis=0, operation='mean'):
'''
Downbins an array to a smaller size.
:param array_like x: The array to down-bin
:param int newsize: The new size of the axis along which to down-bin
:param int axis: The axis to operate on. Default 0
:param str operation: The operation to perform when down-binning. \
Default `mean`
'''
assert newsize < x.shape[axis], \
"The new size of the array must be smaller than the current size."
oldsize = x.shape[axis]
newshape = list(x.shape)
newshape[axis] = newsize
newshape.insert(axis + 1, oldsize // newsize)
trim = oldsize % newsize
if trim:
xtrim = x[:-trim]
else:
xtrim = x
if operation == 'mean':
xbin = np.nanmean(xtrim.reshape(newshape), axis=axis + 1)
elif operation == 'sum':
xbin = np.nansum(xtrim.reshape(newshape), axis=axis + 1)
elif operation == 'quadsum':
xbin = np.sqrt(np.nansum(xtrim.reshape(newshape) ** 2, axis=axis + 1))
elif operation == 'median':
xbin = np.nanmedian(xtrim.reshape(newshape), axis=axis + 1)
else:
raise ValueError("`operation` must be either `mean`, " +
"`sum`, `quadsum`, or `median`.")
return xbin |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lookup(var_name, contexts=(), start=0):
"""lookup the value of the var_name on the stack of contexts :var_name: TODO :contexts: TODO :returns: None if not found """ |
start = len(contexts) if start >=0 else start
for context in reversed(contexts[:start]):
try:
if var_name in context:
return context[var_name]
except TypeError as te:
# we may put variable on the context, skip it
continue
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delimiters_to_re(delimiters):
"""convert delimiters to corresponding regular expressions""" |
# caching
delimiters = tuple(delimiters)
if delimiters in re_delimiters:
re_tag = re_delimiters[delimiters]
else:
open_tag, close_tag = delimiters
# escape
open_tag = ''.join([c if c.isalnum() else '\\' + c for c in open_tag])
close_tag = ''.join([c if c.isalnum() else '\\' + c for c in close_tag])
re_tag = re.compile(open_tag + r'([#^>&{/!=]?)\s*(.*?)\s*([}=]?)' + close_tag, re.DOTALL)
re_delimiters[delimiters] = re_tag
return re_tag |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _escape(self, text):
"""Escape text according to self.escape""" |
ret = EMPTYSTRING if text is None else str(text)
if self.escape:
return html_escape(ret)
else:
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _lookup(self, dot_name, contexts):
"""lookup value for names like 'a.b.c' and handle filters as well""" |
# process filters
filters = [x for x in map(lambda x: x.strip(), dot_name.split('|'))]
dot_name = filters[0]
filters = filters[1:]
# should support paths like '../../a.b.c/../d', etc.
if not dot_name.startswith('.'):
dot_name = './' + dot_name
paths = dot_name.split('/')
last_path = paths[-1]
# path like '../..' or ./../. etc.
refer_context = last_path == '' or last_path == '.' or last_path == '..'
paths = paths if refer_context else paths[:-1]
# count path level
level = 0
for path in paths:
if path == '..':
level -= 1
elif path != '.':
# ../a.b.c/.. in the middle
level += len(path.strip('.').split('.'))
names = last_path.split('.')
# fetch the correct context
if refer_context or names[0] == '':
try:
value = contexts[level-1]
except:
value = None
else:
# support {{a.b.c.d.e}} like lookup
value = lookup(names[0], contexts, level)
# lookup for variables
if not refer_context:
for name in names[1:]:
try:
# a.num (a.1, a.2) to access list
index = parse_int(name)
name = parse_int(name) if isinstance(value, (list, tuple)) else name
value = value[name]
except:
# not found
value = None
break;
# apply filters
for f in filters:
try:
func = self.root.filters[f]
value = func(value)
except:
continue
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _render_children(self, contexts, partials):
"""Render the children tokens""" |
ret = []
for child in self.children:
ret.append(child._render(contexts, partials))
return EMPTYSTRING.join(ret) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _render(self, contexts, partials):
"""render inverted section""" |
val = self._lookup(self.value, contexts)
if val:
return EMPTYSTRING
return self._render_children(contexts, partials) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def Setup():
'''
Called when the code is installed. Sets up directories and downloads
the K2 catalog.
'''
if not os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'cbv')):
os.makedirs(os.path.join(EVEREST_DAT, 'k2', 'cbv'))
GetK2Stars(clobber=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def CDPP(flux, mask=[], cadence='lc'):
'''
Compute the proxy 6-hr CDPP metric.
:param array_like flux: The flux array to compute the CDPP for
:param array_like mask: The indices to be masked
:param str cadence: The light curve cadence. Default `lc`
'''
# 13 cadences is 6.5 hours
rmswin = 13
# Smooth the data on a 2 day timescale
svgwin = 49
# If short cadence, need to downbin
if cadence == 'sc':
newsize = len(flux) // 30
flux = Downbin(flux, newsize, operation='mean')
flux_savgol = SavGol(np.delete(flux, mask), win=svgwin)
if len(flux_savgol):
return Scatter(flux_savgol / np.nanmedian(flux_savgol),
remove_outliers=True, win=rmswin)
else:
return np.nan |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def HasShortCadence(EPIC, season=None):
'''
Returns `True` if short cadence data is available for this target.
:param int EPIC: The EPIC ID number
:param int season: The campaign number. Default :py:obj:`None`
'''
if season is None:
season = Campaign(EPIC)
if season is None:
return None
stars = GetK2Campaign(season)
i = np.where([s[0] == EPIC for s in stars])[0]
if len(i):
return stars[i[0]][3]
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def DVSFile(ID, season, cadence='lc'):
'''
Returns the name of the DVS PDF for a given target.
:param ID: The target ID
:param int season: The target season number
:param str cadence: The cadence type. Default `lc`
'''
if cadence == 'sc':
strcadence = '_sc'
else:
strcadence = ''
return 'hlsp_everest_k2_llc_%d-c%02d_kepler_v%s_dvs%s.pdf' \
% (ID, season, EVEREST_MAJOR_MINOR, strcadence) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def GetTargetCBVs(model):
'''
Returns the design matrix of CBVs for the given target.
:param model: An instance of the :py:obj:`everest` model for the target
'''
# Get the info
season = model.season
name = model.name
# We use the LC light curves as CBVs; there aren't
# enough SC light curves to get a good set
if name.endswith('.sc'):
name = name[:-3]
model.XCBV = sysrem.GetCBVs(season, model=name,
niter=model.cbv_niter,
sv_win=model.cbv_win,
sv_order=model.cbv_order) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def StatsToCSV(campaign, model='nPLD'):
'''
Generate the CSV file used in the search database for the documentation.
'''
statsfile = os.path.join(EVEREST_SRC, 'missions', 'k2',
'tables', 'c%02d_%s.cdpp' % (campaign, model))
csvfile = os.path.join(os.path.dirname(EVEREST_SRC), 'docs',
'c%02d.csv' % campaign)
epic, kp, cdpp6r, cdpp6, _, _, _, _, saturated = \
np.loadtxt(statsfile, unpack=True, skiprows=2)
with open(csvfile, 'w') as f:
print('c%02d' % campaign, file=f)
for i in range(len(epic)):
print('%09d,%.3f,%.3f,%.3f,%d' % (epic[i], kp[i],
cdpp6r[i], cdpp6[i],
int(saturated[i])),
file=f) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_pending_lookups(event, sender, **kwargs):
"""Handle any pending relations to the sending model.
Sent from class_prepared.""" |
key = (sender._meta.app_label, sender._meta.name)
for callback in pending_lookups.pop(key, []):
callback(sender) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def Many2ManyThroughModel(field):
'''Create a Many2Many through model with two foreign key fields and a
CompositeFieldId depending on the two foreign keys.'''
from stdnet.odm import ModelType, StdModel, ForeignKey, CompositeIdField
name_model = field.model._meta.name
name_relmodel = field.relmodel._meta.name
# The two models are the same.
if name_model == name_relmodel:
name_relmodel += '2'
through = field.through
# Create the through model
if through is None:
name = '{0}_{1}'.format(name_model, name_relmodel)
class Meta:
app_label = field.model._meta.app_label
through = ModelType(name, (StdModel,), {'Meta': Meta})
field.through = through
# The first field
field1 = ForeignKey(field.model,
related_name=field.name,
related_manager_class=makeMany2ManyRelatedManager(
field.relmodel,
name_model,
name_relmodel)
)
field1.register_with_model(name_model, through)
# The second field
field2 = ForeignKey(field.relmodel,
related_name=field.related_name,
related_manager_class=makeMany2ManyRelatedManager(
field.model,
name_relmodel,
name_model)
)
field2.register_with_model(name_relmodel, through)
pk = CompositeIdField(name_model, name_relmodel)
pk.register_with_model('id', through) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def makeMany2ManyRelatedManager(formodel, name_relmodel, name_formodel):
'''formodel is the model which the manager .'''
class _Many2ManyRelatedManager(Many2ManyRelatedManager):
pass
_Many2ManyRelatedManager.formodel = formodel
_Many2ManyRelatedManager.name_relmodel = name_relmodel
_Many2ManyRelatedManager.name_formodel = name_formodel
return _Many2ManyRelatedManager |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def metaphone_processor(words):
'''Double metaphone word processor.'''
for word in words:
for w in double_metaphone(word):
if w:
w = w.strip()
if w:
yield w |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def tolerant_metaphone_processor(words):
'''Double metaphone word processor slightly modified so that when no
words are returned by the algorithm, the original word is returned.'''
for word in words:
r = 0
for w in double_metaphone(word):
if w:
w = w.strip()
if w:
r += 1
yield w
if not r:
yield word |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def stemming_processor(words):
'''Porter Stemmer word processor'''
stem = PorterStemmer().stem
for word in words:
word = stem(word, 0, len(word)-1)
yield word |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def Pool(pool='AnyPool', **kwargs):
'''
Chooses between the different pools.
If ``pool == 'AnyPool'``, chooses based on availability.
'''
if pool == 'MPIPool':
return MPIPool(**kwargs)
elif pool == 'MultiPool':
return MultiPool(**kwargs)
elif pool == 'SerialPool':
return SerialPool(**kwargs)
elif pool == 'AnyPool':
if MPIPool.enabled():
return MPIPool(**kwargs)
elif MultiPool.enabled():
return MultiPool(**kwargs)
else:
return SerialPool(**kwargs)
else:
raise ValueError('Invalid pool ``%s``.' % pool) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wait(self):
""" If this isn't the master process, wait for instructions. """ |
if self.is_master():
raise RuntimeError("Master node told to await jobs.")
status = MPI.Status()
while True:
# Event loop.
# Sit here and await instructions.
if self.debug:
print("Worker {0} waiting for task.".format(self.rank))
# Blocking receive to wait for instructions.
task = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
if self.debug:
print("Worker {0} got task {1} with tag {2}."
.format(self.rank, type(task), status.tag))
# Check if message is special sentinel signaling end.
# If so, stop.
if isinstance(task, _close_pool_message):
if self.debug:
print("Worker {0} told to quit.".format(self.rank))
break
# Check if message is special type containing new function
# to be applied
if isinstance(task, _function_wrapper):
self.function = task.function
if self.debug:
print("Worker {0} replaced its task function: {1}."
.format(self.rank, self.function))
continue
# If not a special message, just run the known function on
# the input and return it asynchronously.
result = self.function(task)
if self.debug:
print("Worker {0} sending answer {1} with tag {2}."
.format(self.rank, type(result), status.tag))
self.comm.isend(result, dest=0, tag=status.tag)
# Kill the process?
if self.exit_on_end:
sys.exit() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def commit_when_no_transaction(f):
'''Decorator for committing changes when the instance session is
not in a transaction.'''
def _(self, *args, **kwargs):
r = f(self, *args, **kwargs)
return self.session.add(self) if self.session is not None else r
_.__name__ = f.__name__
_.__doc__ = f.__doc__
return _ |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def irange(self, start=0, end=-1, callback=None, withscores=True,
**options):
'''Return the range by rank between start and end.'''
backend = self.read_backend
res = backend.structure(self).irange(start, end,
withscores=withscores,
**options)
if not callback:
callback = self.load_data if withscores else self.load_values
return backend.execute(res, callback) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def pop_front(self):
'''Remove the first element from of the list.'''
backend = self.backend
return backend.execute(backend.structure(self).pop_front(),
self.value_pickler.loads) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def block_pop_back(self, timeout=10):
'''Remove the last element from of the list. If no elements are
available, blocks for at least ``timeout`` seconds.'''
value = yield self.backend_structure().block_pop_back(timeout)
if value is not None:
yield self.value_pickler.loads(value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def block_pop_front(self, timeout=10):
'''Remove the first element from of the list. If no elements are
available, blocks for at least ``timeout`` seconds.'''
value = yield self.backend_structure().block_pop_front(timeout)
if value is not None:
yield self.value_pickler.loads(value) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.