text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Scales using the AdvanceMAME Scale2X algorithm which does a
<END_TASK>
<USER_TASK:>
Description:
def scale2x(self, surface):
"""
Scales using the AdvanceMAME Scale2X algorithm which does a
'jaggie-less' scale of bitmap graphics.
""" |
assert(self._scale == 2)
return self._pygame.transform.scale2x(surface) |
<SYSTEM_TASK:>
Smooth scaling using MMX or SSE extensions if available
<END_TASK>
<USER_TASK:>
Description:
def smoothscale(self, surface):
"""
Smooth scaling using MMX or SSE extensions if available
""" |
return self._pygame.transform.smoothscale(surface, self._output_size) |
<SYSTEM_TASK:>
Fast scale operation that does not sample the results
<END_TASK>
<USER_TASK:>
Description:
def identity(self, surface):
"""
Fast scale operation that does not sample the results
""" |
return self._pygame.transform.scale(surface, self._output_size) |
<SYSTEM_TASK:>
Converts RGB values to the nearest equivalent xterm-256 color.
<END_TASK>
<USER_TASK:>
Description:
def rgb2short(r, g, b):
"""
Converts RGB values to the nearest equivalent xterm-256 color.
""" |
# Using list of snap points, convert RGB value to cube indexes
r, g, b = [len(tuple(s for s in snaps if s < x)) for x in (r, g, b)]
# Simple colorcube transform
return (r * 36) + (g * 6) + b + 16 |
<SYSTEM_TASK:>
Takes an image, scales it according to the nominated transform, and
<END_TASK>
<USER_TASK:>
Description:
def display(self, image):
"""
Takes an image, scales it according to the nominated transform, and
stores it for later building into an animated GIF.
""" |
assert(image.size == self.size)
self._last_image = image
image = self.preprocess(image)
surface = self.to_surface(image, alpha=self._contrast)
rawbytes = self._pygame.image.tostring(surface, "RGB", False)
im = Image.frombytes("RGB", surface.get_size(), rawbytes)
self._images.append(im)
self._count += 1
logger.debug("Recording frame: {0}".format(self._count))
if self._max_frames and self._count >= self._max_frames:
sys.exit(0) |
<SYSTEM_TASK:>
Count the number of black pixels in a rendered character.
<END_TASK>
<USER_TASK:>
Description:
def _char_density(self, c, font=ImageFont.load_default()):
"""
Count the number of black pixels in a rendered character.
""" |
image = Image.new('1', font.getsize(c), color=255)
draw = ImageDraw.Draw(image)
draw.text((0, 0), c, fill="white", font=font)
return collections.Counter(image.getdata())[0] |
<SYSTEM_TASK:>
Print message to console, indent format may apply.
<END_TASK>
<USER_TASK:>
Description:
def show(self, msg, indent=0, style="", **kwargs):
"""
Print message to console, indent format may apply.
""" |
if self.enable_verbose:
new_msg = self.MessageTemplate.with_style.format(
indent=self.tab * indent,
style=style,
msg=msg,
)
print(new_msg, **kwargs) |
<SYSTEM_TASK:>
Relink the file handler association you just removed.
<END_TASK>
<USER_TASK:>
Description:
def recover_all_handler(self):
"""
Relink the file handler association you just removed.
""" |
for handler in self._handler_cache:
self.logger.addHandler(handler)
self._handler_cache = list() |
<SYSTEM_TASK:>
Adds a handler to save to a file. Includes debug stuff.
<END_TASK>
<USER_TASK:>
Description:
def update(self, fname):
"""
Adds a handler to save to a file. Includes debug stuff.
""" |
ltfh = FileHandler(fname)
self._log.addHandler(ltfh) |
<SYSTEM_TASK:>
Delete a folder recursive.
<END_TASK>
<USER_TASK:>
Description:
def delete_dir_rec(path: Path):
"""
Delete a folder recursive.
:param path: folder to deleted
:type path: ~pathlib.Path
""" |
if not path.exists() or not path.is_dir():
return
for sub in path.iterdir():
if sub.is_dir():
delete_dir_rec(sub)
else:
sub.unlink()
path.rmdir() |
<SYSTEM_TASK:>
Prints all registered plugins and checks if they can be loaded or not.
<END_TASK>
<USER_TASK:>
Description:
def print_plugin_list(plugins: Dict[str, pkg_resources.EntryPoint]):
"""
Prints all registered plugins and checks if they can be loaded or not.
:param plugins: plugins
:type plugins: Dict[str, ~pkg_resources.EntryPoint]
""" |
for trigger, entry_point in plugins.items():
try:
plugin_class = entry_point.load()
version = str(plugin_class._info.version)
print(
f"{trigger} (ok)\n"
f" {version}"
)
except Exception:
print(
f"{trigger} (failed)"
) |
<SYSTEM_TASK:>
Determines whether two windows overlap
<END_TASK>
<USER_TASK:>
Description:
def overlap(xl1, yl1, nx1, ny1, xl2, yl2, nx2, ny2):
"""
Determines whether two windows overlap
""" |
return (xl2 < xl1+nx1 and xl2+nx2 > xl1 and
yl2 < yl1+ny1 and yl2+ny2 > yl1) |
<SYSTEM_TASK:>
Saves the current setup to disk.
<END_TASK>
<USER_TASK:>
Description:
def saveJSON(g, data, backup=False):
"""
Saves the current setup to disk.
g : hcam_drivers.globals.Container
Container with globals
data : dict
The current setup in JSON compatible dictionary format.
backup : bool
If we are saving a backup on close, don't prompt for filename
""" |
if not backup:
fname = filedialog.asksaveasfilename(
defaultextension='.json',
filetypes=[('json files', '.json'), ],
initialdir=g.cpars['app_directory']
)
else:
fname = os.path.join(os.path.expanduser('~/.hdriver'), 'app.json')
if not fname:
g.clog.warn('Aborted save to disk')
return False
with open(fname, 'w') as of:
of.write(
json.dumps(data, sort_keys=True, indent=4,
separators=(',', ': '))
)
g.clog.info('Saved setup to' + fname)
return True |
<SYSTEM_TASK:>
Posts the current setup to the camera and data servers.
<END_TASK>
<USER_TASK:>
Description:
def postJSON(g, data):
"""
Posts the current setup to the camera and data servers.
g : hcam_drivers.globals.Container
Container with globals
data : dict
The current setup in JSON compatible dictionary format.
""" |
g.clog.debug('Entering postJSON')
# encode data as json
json_data = json.dumps(data).encode('utf-8')
# Send the xml to the server
url = urllib.parse.urljoin(g.cpars['hipercam_server'], g.SERVER_POST_PATH)
g.clog.debug('Server URL = ' + url)
opener = urllib.request.build_opener()
g.clog.debug('content length = ' + str(len(json_data)))
req = urllib.request.Request(url, data=json_data, headers={'Content-type': 'application/json'})
response = opener.open(req, timeout=15).read()
g.rlog.debug('Server response: ' + response.decode())
csr = ReadServer(response, status_msg=False)
if not csr.ok:
g.clog.warn('Server response was not OK')
g.rlog.warn('postJSON response: ' + response.decode())
g.clog.warn('Server error = ' + csr.err)
return False
# now try to setup nodding server if appropriate
if g.cpars['telins_name'] == 'GTC':
url = urllib.parse.urljoin(g.cpars['gtc_offset_server'], 'setup')
g.clog.debug('Offset Server URL = ' + url)
opener = urllib.request.build_opener()
try:
req = urllib.request.Request(url, data=json_data, headers={'Content-type': 'application/json'})
response = opener.open(req, timeout=5).read().decode()
except Exception as err:
g.clog.warn('Could not communicate with GTC offsetter')
g.clog.warn(str(err))
return False
g.rlog.info('Offset Server Response: ' + response)
if not json.loads(response)['status'] == 'OK':
g.clog.warn('Offset Server response was not OK')
return False
g.clog.debug('Leaving postJSON')
return True |
<SYSTEM_TASK:>
Create JSON compatible dictionary from current settings
<END_TASK>
<USER_TASK:>
Description:
def createJSON(g, full=True):
"""
Create JSON compatible dictionary from current settings
Parameters
----------
g : hcam_drivers.globals.Container
Container with globals
""" |
data = dict()
if 'gps_attached' not in g.cpars:
data['gps_attached'] = 1
else:
data['gps_attached'] = 1 if g.cpars['gps_attached'] else 0
data['appdata'] = g.ipars.dumpJSON()
data['user'] = g.rpars.dumpJSON()
if full:
data['hardware'] = g.ccd_hw.dumpJSON()
data['tcs'] = g.info.dumpJSON()
if g.cpars['telins_name'].lower() == 'gtc' and has_corba:
try:
s = get_telescope_server()
data['gtc_headers'] = dict(
create_header_from_telpars(s.getTelescopeParams())
)
except:
g.clog.warn('cannot get GTC headers from telescope server')
return data |
<SYSTEM_TASK:>
Uploads a table of TCS data to the servers, which is appended onto a run.
<END_TASK>
<USER_TASK:>
Description:
def insertFITSHDU(g):
"""
Uploads a table of TCS data to the servers, which is appended onto a run.
Arguments
---------
g : hcam_drivers.globals.Container
the Container object of application globals
""" |
if not g.cpars['hcam_server_on']:
g.clog.warn('insertFITSHDU: servers are not active')
return False
run_number = getRunNumber(g)
tcs_table = g.info.tcs_table
g.clog.info('Adding TCS table data to run{:04d}.fits'.format(run_number))
url = g.cpars['hipercam_server'] + 'addhdu'
try:
fd = StringIO()
ascii.write(tcs_table, format='ecsv', output=fd)
files = {'file': fd.getvalue()}
r = requests.post(url, data={'run': 'run{:04d}.fits'.format(run_number)},
files=files)
fd.close()
rs = ReadServer(r.content, status_msg=False)
if rs.ok:
g.clog.info('Response from server was OK')
return True
else:
g.clog.warn('Response from server was not OK')
g.clog.warn('Reason: ' + rs.err)
return False
except Exception as err:
g.clog.warn('insertFITSHDU failed')
g.clog.warn(str(err)) |
<SYSTEM_TASK:>
Executes a command by sending it to the rack server
<END_TASK>
<USER_TASK:>
Description:
def execCommand(g, command, timeout=10):
"""
Executes a command by sending it to the rack server
Arguments:
g : hcam_drivers.globals.Container
the Container object of application globals
command : (string)
the command (see below)
Possible commands are:
start : starts a run
stop : stops a run
abort : aborts a run
online : bring ESO control server online and power up hardware
off : put ESO control server in idle state and power down
standby : server can communicate, but child processes disabled
reset : resets the NGC controller front end
Returns True/False according to whether the command
succeeded or not.
""" |
if not g.cpars['hcam_server_on']:
g.clog.warn('execCommand: servers are not active')
return False
try:
url = g.cpars['hipercam_server'] + command
g.clog.info('execCommand, command = "' + command + '"')
response = urllib.request.urlopen(url, timeout=timeout)
rs = ReadServer(response.read(), status_msg=False)
g.rlog.info('Server response =\n' + rs.resp())
if rs.ok:
g.clog.info('Response from server was OK')
return True
else:
g.clog.warn('Response from server was not OK')
g.clog.warn('Reason: ' + rs.err)
return False
except urllib.error.URLError as err:
g.clog.warn('execCommand failed')
g.clog.warn(str(err))
return False |
<SYSTEM_TASK:>
Polls the data server to see if a run is active
<END_TASK>
<USER_TASK:>
Description:
def isRunActive(g):
"""
Polls the data server to see if a run is active
""" |
if g.cpars['hcam_server_on']:
url = g.cpars['hipercam_server'] + 'summary'
response = urllib.request.urlopen(url, timeout=2)
rs = ReadServer(response.read(), status_msg=True)
if not rs.ok:
raise DriverError('isRunActive error: ' + str(rs.err))
if rs.state == 'idle':
return False
elif rs.state == 'active':
return True
else:
raise DriverError('isRunActive error, state = ' + rs.state)
else:
raise DriverError('isRunActive error: servers are not active') |
<SYSTEM_TASK:>
Polls the data server to find the current frame number.
<END_TASK>
<USER_TASK:>
Description:
def getFrameNumber(g):
"""
Polls the data server to find the current frame number.
Throws an exceotion if it cannot determine it.
""" |
if not g.cpars['hcam_server_on']:
raise DriverError('getRunNumber error: servers are not active')
url = g.cpars['hipercam_server'] + 'status/DET.FRAM2.NO'
response = urllib.request.urlopen(url, timeout=2)
rs = ReadServer(response.read(), status_msg=False)
try:
msg = rs.msg
except:
raise DriverError('getFrameNumber error: no message found')
try:
frame_no = int(msg.split()[1])
except:
raise DriverError('getFrameNumber error: invalid msg ' + msg)
return frame_no |
<SYSTEM_TASK:>
Polls the data server to find the current run number. Throws
<END_TASK>
<USER_TASK:>
Description:
def getRunNumber(g):
"""
Polls the data server to find the current run number. Throws
exceptions if it can't determine it.
""" |
if not g.cpars['hcam_server_on']:
raise DriverError('getRunNumber error: servers are not active')
url = g.cpars['hipercam_server'] + 'summary'
response = urllib.request.urlopen(url, timeout=2)
rs = ReadServer(response.read(), status_msg=True)
if rs.ok:
return rs.run
else:
raise DriverError('getRunNumber error: ' + str(rs.err)) |
<SYSTEM_TASK:>
Sends off a request to Simbad to check whether a target is recognised.
<END_TASK>
<USER_TASK:>
Description:
def checkSimbad(g, target, maxobj=5, timeout=5):
"""
Sends off a request to Simbad to check whether a target is recognised.
Returns with a list of results, or raises an exception if it times out
""" |
url = 'http://simbad.u-strasbg.fr/simbad/sim-script'
q = 'set limit ' + str(maxobj) + \
'\nformat object form1 "Target: %IDLIST(1) | %COO(A D;ICRS)"\nquery ' \
+ target
query = urllib.parse.urlencode({'submit': 'submit script', 'script': q})
resp = urllib.request.urlopen(url, query.encode(), timeout)
data = False
error = False
results = []
for line in resp:
line = line.decode()
if line.startswith('::data::'):
data = True
if line.startswith('::error::'):
error = True
if data and line.startswith('Target:'):
name, coords = line[7:].split(' | ')
results.append(
{'Name': name.strip(), 'Position': coords.strip(),
'Frame': 'ICRS'})
resp.close()
if error and len(results):
g.clog.warn('drivers.check: Simbad: there appear to be some ' +
'results but an error was unexpectedly raised.')
return results |
<SYSTEM_TASK:>
Version of run that traps Exceptions and stores
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""
Version of run that traps Exceptions and stores
them in the fifo
""" |
try:
threading.Thread.run(self)
except Exception:
t, v, tb = sys.exc_info()
error = traceback.format_exception_only(t, v)[0][:-1]
tback = (self.name + ' Traceback (most recent call last):\n' +
''.join(traceback.format_tb(tb)))
self.fifo.put((self.name, error, tback)) |
<SYSTEM_TASK:>
Simple method to access root for a widget
<END_TASK>
<USER_TASK:>
Description:
def get_root(w):
"""
Simple method to access root for a widget
""" |
next_level = w
while next_level.master:
next_level = next_level.master
return next_level |
<SYSTEM_TASK:>
Run a plugin so use the download routine and clean up after.
<END_TASK>
<USER_TASK:>
Description:
def run(plugin_name: str, options: List[str] = None) -> PluginState:
"""
Run a plugin so use the download routine and clean up after.
:param plugin_name: name of plugin
:type plugin_name: str
:param options: parameters which will be send to the plugin initialization
:type options: List[str]
:return: success
:rtype: ~unidown.plugin.plugin_state.PluginState
""" |
if options is None:
options = []
if plugin_name not in dynamic_data.AVAIL_PLUGINS:
msg = 'Plugin ' + plugin_name + ' was not found.'
logging.error(msg)
print(msg)
return PluginState.NOT_FOUND
try:
plugin_class = dynamic_data.AVAIL_PLUGINS[plugin_name].load()
plugin = plugin_class(options)
except Exception:
msg = 'Plugin ' + plugin_name + ' crashed while loading.'
logging.exception(msg)
print(msg + ' Check log for more information.')
return PluginState.LOAD_CRASH
else:
logging.info('Loaded plugin: ' + plugin_name)
try:
download_from_plugin(plugin)
plugin.clean_up()
except PluginException as ex:
msg = f"Plugin {plugin.name} stopped working. Reason: {'unknown' if (ex.msg == '') else ex.msg}"
logging.error(msg)
print(msg)
return PluginState.RUN_FAIL
except Exception:
msg = 'Plugin ' + plugin.name + ' crashed.'
logging.exception(msg)
print(msg + ' Check log for more information.')
return PluginState.RUN_CRASH
else:
logging.info(plugin.name + ' ends without errors.')
return PluginState.END_SUCCESS |
<SYSTEM_TASK:>
Download the version tag from remote.
<END_TASK>
<USER_TASK:>
Description:
def get_newest_app_version() -> Version:
"""
Download the version tag from remote.
:return: version from remote
:rtype: ~packaging.version.Version
""" |
with urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) as p_man:
pypi_json = p_man.urlopen('GET', static_data.PYPI_JSON_URL).data.decode('utf-8')
releases = json.loads(pypi_json).get('releases', [])
online_version = Version('0.0.0')
for release in releases:
cur_version = Version(release)
if not cur_version.is_prerelease:
online_version = max(online_version, cur_version)
return online_version |
<SYSTEM_TASK:>
Freeze all settings so they cannot be altered
<END_TASK>
<USER_TASK:>
Description:
def freeze(self):
"""
Freeze all settings so they cannot be altered
""" |
self.app.disable()
self.clear.disable()
self.nod.disable()
self.led.disable()
self.dummy.disable()
self.readSpeed.disable()
self.expose.disable()
self.number.disable()
self.wframe.disable(everything=True)
self.nmult.disable()
self.frozen = True |
<SYSTEM_TASK:>
Returns a string suitable to sending off to rtplot when
<END_TASK>
<USER_TASK:>
Description:
def getRtplotWins(self):
""""
Returns a string suitable to sending off to rtplot when
it asks for window parameters. Returns null string '' if
the windows are not OK. This operates on the basis of
trying to send something back, even if it might not be
OK as a window setup. Note that we have to take care
here not to update any GUI components because this is
called outside of the main thread.
""" |
try:
if self.isFF():
return 'fullframe\r\n'
elif self.isDrift():
xbin = self.wframe.xbin.value()
ybin = self.wframe.ybin.value()
nwin = 2*self.wframe.npair.value()
ret = str(xbin) + ' ' + str(ybin) + ' ' + str(nwin) + '\r\n'
for xsl, xsr, ys, nx, ny in self.wframe:
ret += '{:d} {:d} {:d} {:d}\r\n'.format(
xsl, ys, nx, ny
)
ret += '{:d} {:d} {:d} {:d}'.format(
xsr, ys, nx, ny
)
return ret
else:
xbin = self.wframe.xbin.value()
ybin = self.wframe.ybin.value()
nwin = 4*self.wframe.nquad.value()
ret = str(xbin) + ' ' + str(ybin) + ' ' + str(nwin) + '\r\n'
for xsll, xsul, xslr, xsur, ys, nx, ny in self.wframe:
ret += '{:d} {:d} {:d} {:d}\r\n'.format(
xsll, ys, nx, ny
)
ret += '{:d} {:d} {:d} {:d}\r\n'.format(
xsul, 1025 - ys - ny, nx, ny
)
ret += '{:d} {:d} {:d} {:d}\r\n'.format(
xslr, ys, nx, ny
)
ret += '{:d} {:d} {:d} {:d}\r\n'.format(
xsur, 1025 - ys - ny, nx, ny
)
return ret
except:
return '' |
<SYSTEM_TASK:>
Freeze all settings so that they can't be altered
<END_TASK>
<USER_TASK:>
Description:
def freeze(self):
"""
Freeze all settings so that they can't be altered
""" |
self.target.disable()
self.filter.configure(state='disable')
self.prog_ob.configure(state='disable')
self.pi.configure(state='disable')
self.observers.configure(state='disable')
self.comment.configure(state='disable') |
<SYSTEM_TASK:>
Unfreeze all settings so that they can be altered
<END_TASK>
<USER_TASK:>
Description:
def unfreeze(self):
"""
Unfreeze all settings so that they can be altered
""" |
g = get_root(self).globals
self.filter.configure(state='normal')
dtype = g.observe.rtype()
if dtype == 'data caution' or dtype == 'data' or dtype == 'technical':
self.prog_ob.configure(state='normal')
self.pi.configure(state='normal')
self.target.enable()
self.observers.configure(state='normal')
self.comment.configure(state='normal') |
<SYSTEM_TASK:>
Updates values after first checking instrument parameters are OK.
<END_TASK>
<USER_TASK:>
Description:
def checkUpdate(self, *args):
"""
Updates values after first checking instrument parameters are OK.
This is not integrated within update to prevent ifinite recursion
since update gets called from ipars.
""" |
g = get_root(self).globals
if not self.check():
g.clog.warn('Current observing parameters are not valid.')
return False
if not g.ipars.check():
g.clog.warn('Current instrument parameters are not valid.')
return False |
<SYSTEM_TASK:>
Updates values. You should run a check on the instrument and
<END_TASK>
<USER_TASK:>
Description:
def update(self, *args):
"""
Updates values. You should run a check on the instrument and
target parameters before calling this.
""" |
g = get_root(self).globals
expTime, deadTime, cycleTime, dutyCycle, frameRate = g.ipars.timing()
total, peak, peakSat, peakWarn, ston, ston3 = \
self.counts(expTime, cycleTime)
if cycleTime < 0.01:
self.cadence.config(text='{0:7.5f} s'.format(cycleTime))
elif cycleTime < 0.1:
self.cadence.config(text='{0:6.4f} s'.format(cycleTime))
elif cycleTime < 1.:
self.cadence.config(text='{0:5.3f} s'.format(cycleTime))
elif cycleTime < 10.:
self.cadence.config(text='{0:4.2f} s'.format(cycleTime))
elif cycleTime < 100.:
self.cadence.config(text='{0:4.1f} s'.format(cycleTime))
elif cycleTime < 1000.:
self.cadence.config(text='{0:4.0f} s'.format(cycleTime))
else:
self.cadence.config(text='{0:5.0f} s'.format(cycleTime))
if expTime < 0.01:
self.exposure.config(text='{0:7.5f} s'.format(expTime))
elif expTime < 0.1:
self.exposure.config(text='{0:6.4f} s'.format(expTime))
elif expTime < 1.:
self.exposure.config(text='{0:5.3f} s'.format(expTime))
elif expTime < 10.:
self.exposure.config(text='{0:4.2f} s'.format(expTime))
elif expTime < 100.:
self.exposure.config(text='{0:4.1f} s'.format(expTime))
elif expTime < 1000.:
self.exposure.config(text='{0:4.0f} s'.format(expTime))
else:
self.exposure.config(text='{0:5.0f} s'.format(expTime))
self.duty.config(text='{0:4.1f} %'.format(dutyCycle))
self.peak.config(text='{0:d} cts'.format(int(round(peak))))
if peakSat:
self.peak.config(bg=g.COL['error'])
elif peakWarn:
self.peak.config(bg=g.COL['warn'])
else:
self.peak.config(bg=g.COL['main'])
self.total.config(text='{0:d} cts'.format(int(round(total))))
self.ston.config(text='{0:.1f}'.format(ston))
self.ston3.config(text='{0:.1f}'.format(ston3)) |
<SYSTEM_TASK:>
Disable the button, if in non-expert mode.
<END_TASK>
<USER_TASK:>
Description:
def disable(self):
"""
Disable the button, if in non-expert mode.
""" |
w.ActButton.disable(self)
g = get_root(self).globals
if self._expert:
self.config(bg=g.COL['start'])
else:
self.config(bg=g.COL['startD']) |
<SYSTEM_TASK:>
Turns on 'expert' status whereby the button is always enabled,
<END_TASK>
<USER_TASK:>
Description:
def setExpert(self):
"""
Turns on 'expert' status whereby the button is always enabled,
regardless of its activity status.
""" |
w.ActButton.setExpert(self)
g = get_root(self).globals
self.config(bg=g.COL['start']) |
<SYSTEM_TASK:>
Carries out the action associated with the Load button
<END_TASK>
<USER_TASK:>
Description:
def act(self):
"""
Carries out the action associated with the Load button
""" |
g = get_root(self).globals
fname = filedialog.askopenfilename(
defaultextension='.json',
filetypes=[('json files', '.json'), ('fits files', '.fits')],
initialdir=g.cpars['app_directory'])
if not fname:
g.clog.warn('Aborted load from disk')
return False
# load json
if fname.endswith('.json'):
with open(fname) as ifname:
json_string = ifname.read()
else:
json_string = jsonFromFits(fname)
# load up the instrument settings
g.ipars.loadJSON(json_string)
# load up the run parameters
g.rpars.loadJSON(json_string)
return True |
<SYSTEM_TASK:>
Carries out the action associated with the Save button
<END_TASK>
<USER_TASK:>
Description:
def act(self):
"""
Carries out the action associated with the Save button
""" |
g = get_root(self).globals
g.clog.info('\nSaving current application to disk')
# check instrument parameters
if not g.ipars.check():
g.clog.warn('Invalid instrument parameters; save failed.')
return False
# check run parameters
rok, msg = g.rpars.check()
if not rok:
g.clog.warn('Invalid run parameters; save failed.')
g.clog.warn(msg)
return False
# Get data to save
data = createJSON(g, full=False)
# Save to disk
if saveJSON(g, data):
# modify buttons
g.observe.load.enable()
g.observe.unfreeze.disable()
# unfreeze the instrument and run params
g.ipars.unfreeze()
g.rpars.unfreeze()
return True
else:
return False |
<SYSTEM_TASK:>
Carries out the action associated with the Unfreeze button
<END_TASK>
<USER_TASK:>
Description:
def act(self):
"""
Carries out the action associated with the Unfreeze button
""" |
g = get_root(self).globals
g.ipars.unfreeze()
g.rpars.unfreeze()
g.observe.load.enable()
self.disable() |
<SYSTEM_TASK:>
Returns the fully qualified name of the class-under-construction, if possible,
<END_TASK>
<USER_TASK:>
Description:
def qualname(self) -> str:
"""
Returns the fully qualified name of the class-under-construction, if possible,
otherwise just the class name.
""" |
if self.module:
return self.module + '.' + self.name
return self.name |
<SYSTEM_TASK:>
Returns the value for ``self.name`` given the class-under-construction's class
<END_TASK>
<USER_TASK:>
Description:
def get_value(self, Meta: Type[object], base_classes_meta, mcs_args: McsArgs) -> Any:
"""
Returns the value for ``self.name`` given the class-under-construction's class
``Meta``. If it's not found there, and ``self.inherit == True`` and there is a
base class that has a class ``Meta``, use that value, otherwise ``self.default``.
:param Meta: the class ``Meta`` (if any) from the class-under-construction
(**NOTE:** this will be an ``object`` or ``None``, NOT an instance
of :class:`MetaOptionsFactory`)
:param base_classes_meta: the :class:`MetaOptionsFactory` instance (if any) from
the base class of the class-under-construction
:param mcs_args: the :class:`McsArgs` for the class-under-construction
""" |
value = self.default
if self.inherit and base_classes_meta is not None:
value = getattr(base_classes_meta, self.name, value)
if Meta is not None:
value = getattr(Meta, self.name, value)
return value |
<SYSTEM_TASK:>
Avoid repeated trigger of callback.
<END_TASK>
<USER_TASK:>
Description:
def on_key_release_repeat(self, *dummy):
"""
Avoid repeated trigger of callback.
When holding a key down, multiple key press and release events
are fired in succession. Debouncing is implemented to squash these.
""" |
self.has_prev_key_release = self.after_idle(self.on_key_release, dummy) |
<SYSTEM_TASK:>
Subtracts num from the current value
<END_TASK>
<USER_TASK:>
Description:
def sub(self, num):
"""
Subtracts num from the current value
""" |
try:
val = self.value() - num
except:
val = -num
self.set(max(0, val)) |
<SYSTEM_TASK:>
Adds num to the current value, jumping up the next
<END_TASK>
<USER_TASK:>
Description:
def add(self, num):
"""
Adds num to the current value, jumping up the next
multiple of mfac if the result is not a multiple already
""" |
try:
val = self.value() + num
except:
val = num
chunk = self.mfac.value()
if val % chunk > 0:
if num > 0:
val = chunk*(val // chunk + 1)
elif num < 0:
val = chunk*(val // chunk)
val = max(self._min(), min(self._max(), val))
self.set(val) |
<SYSTEM_TASK:>
Sets current value to num
<END_TASK>
<USER_TASK:>
Description:
def set(self, num):
"""
Sets current value to num
""" |
if self.validate(num) is not None:
self.index = self.allowed.index(num)
IntegerEntry.set(self, num) |
<SYSTEM_TASK:>
This prevents setting any value more precise than 0.00001
<END_TASK>
<USER_TASK:>
Description:
def validate(self, value):
"""
This prevents setting any value more precise than 0.00001
""" |
try:
# trap blank fields here
if value:
v = float(value)
if (v != 0 and v < self.fmin) or v > self.fmax:
return None
if abs(round(100000*v)-100000*v) > 1.e-12:
return None
return value
except ValueError:
return None |
<SYSTEM_TASK:>
Disable the button, if in non-expert mode;
<END_TASK>
<USER_TASK:>
Description:
def disable(self):
"""
Disable the button, if in non-expert mode;
unset its activity flag come-what-may.
""" |
if not self._expert:
self.config(state='disable')
self._active = False |
<SYSTEM_TASK:>
Turns off 'expert' status whereby to allow a button to be disabled
<END_TASK>
<USER_TASK:>
Description:
def setNonExpert(self):
"""
Turns off 'expert' status whereby to allow a button to be disabled
""" |
self._expert = False
if self._active:
self.enable()
else:
self.disable() |
<SYSTEM_TASK:>
Adds an angle to the value
<END_TASK>
<USER_TASK:>
Description:
def add(self, quantity):
"""
Adds an angle to the value
""" |
newvalue = self._value + quantity
self.set(newvalue.deg) |
<SYSTEM_TASK:>
Subtracts an angle from the value
<END_TASK>
<USER_TASK:>
Description:
def sub(self, quantity):
"""
Subtracts an angle from the value
""" |
newvalue = self._value - quantity
self.set(newvalue.deg) |
<SYSTEM_TASK:>
Carries out the action associated with Stop button
<END_TASK>
<USER_TASK:>
Description:
def act(self):
"""
Carries out the action associated with Stop button
""" |
g = get_root(self).globals
g.clog.debug('Stop pressed')
# Stop exposure meter
# do this first, so timer doesn't also try to enable idle mode
g.info.timer.stop()
def stop_in_background():
try:
self.stopping = True
if execCommand(g, 'abort'):
self.stopped_ok = True
else:
g.clog.warn('Failed to stop run')
self.stopped_ok = False
self.stopping = False
except Exception as err:
g.clog.warn('Failed to stop run. Error = ' + str(err))
self.stopping = False
self.stopped_ok = False
# stopping can take a while during which the GUI freezes so run in
# background.
t = threading.Thread(target=stop_in_background)
t.daemon = True
t.start()
self.after(500, self.check) |
<SYSTEM_TASK:>
Checks the status of the stop exposure command
<END_TASK>
<USER_TASK:>
Description:
def check(self):
"""
Checks the status of the stop exposure command
This is run in background and can take a few seconds
""" |
g = get_root(self).globals
if self.stopped_ok:
# Exposure stopped OK; modify buttons
self.disable()
# try and write FITS table before enabling start button, otherwise
# a new start will clear table
try:
insertFITSHDU(g)
except Exception as err:
g.clog.warn('Could not add FITS Table to run')
g.clog.warn(str(err))
g.observe.start.enable()
g.setup.powerOn.disable()
g.setup.powerOff.enable()
# Report that run has stopped
g.clog.info('Run stopped')
# enable idle mode now run has stopped
g.clog.info('Setting chips to idle')
idle = {'appdata': {'app': 'Idle'}}
try:
success = postJSON(g, idle)
if not success:
raise Exception('postJSON returned false')
except Exception as err:
g.clog.warn('Failed to enable idle mode')
g.clog.warn(str(err))
g.clog.info('Stopping offsets (if running')
try:
success = stopNodding(g)
if not success:
raise Exception('Failed to stop dithering: response was false')
except Exception as err:
g.clog.warn('Failed to stop GTC offset script')
g.clog.warn(str(err))
return True
elif self.stopping:
# Exposure in process of stopping
# Disable lots of buttons
self.disable()
g.observe.start.disable()
g.setup.powerOn.disable()
g.setup.powerOff.disable()
# wait a second before trying again
self.after(500, self.check)
else:
self.enable()
g.observe.start.disable()
g.setup.powerOn.disable()
g.setup.powerOff.disable()
# Start exposure meter
g.info.timer.start()
return False |
<SYSTEM_TASK:>
Switches colour of verify button
<END_TASK>
<USER_TASK:>
Description:
def modver(self, *args):
"""
Switches colour of verify button
""" |
g = get_root(self).globals
if self.ok():
tname = self.val.get()
if tname in self.successes:
# known to be in simbad
self.verify.config(bg=g.COL['start'])
elif tname in self.failures:
# known not to be in simbad
self.verify.config(bg=g.COL['stop'])
else:
# not known whether in simbad
self.verify.config(bg=g.COL['main'])
self.verify.config(state='normal')
else:
self.verify.config(bg=g.COL['main'])
self.verify.config(state='disable')
if self.callback is not None:
self.callback() |
<SYSTEM_TASK:>
Carries out the action associated with Verify button
<END_TASK>
<USER_TASK:>
Description:
def act(self):
"""
Carries out the action associated with Verify button
""" |
tname = self.val.get()
g = get_root(self).globals
g.clog.info('Checking ' + tname + ' in simbad')
try:
ret = checkSimbad(g, tname)
if len(ret) == 0:
self.verify.config(bg=g.COL['stop'])
g.clog.warn('No matches to "' + tname + '" found.')
if tname not in self.failures:
self.failures.append(tname)
elif len(ret) == 1:
self.verify.config(bg=g.COL['start'])
g.clog.info(tname + ' verified OK in simbad')
g.clog.info('Primary simbad name = ' + ret[0]['Name'])
if tname not in self.successes:
self.successes.append(tname)
else:
g.clog.warn('More than one match to "' + tname + '" found')
self.verify.config(bg=g.COL['stop'])
if tname not in self.failures:
self.failures.append(tname)
except urllib.error.URLError:
g.clog.warn('Simbad lookup timed out')
except socket.timeout:
g.clog.warn('Simbad lookup timed out') |
<SYSTEM_TASK:>
Modifies widget according to expertise level, which in this
<END_TASK>
<USER_TASK:>
Description:
def setExpertLevel(self):
"""
Modifies widget according to expertise level, which in this
case is just matter of hiding or revealing the button to
set CCD temps
""" |
g = get_root(self).globals
level = g.cpars['expert_level']
if level == 0:
if self.val.get() == 'CCD TECs':
self.val.set('Observe')
self._changed()
self.tecs.grid_forget()
else:
self.tecs.grid(row=0, column=3, sticky=tk.W) |
<SYSTEM_TASK:>
Starts the timer from zero
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""
Starts the timer from zero
""" |
self.startTime = time.time()
self.configure(text='{0:<d} s'.format(0))
self.update() |
<SYSTEM_TASK:>
Periodically update a table of info from the TCS.
<END_TASK>
<USER_TASK:>
Description:
def update_tcs_table(self):
"""
Periodically update a table of info from the TCS.
Only works at GTC
""" |
g = get_root(self).globals
if not g.cpars['tcs_on'] or not g.cpars['telins_name'].lower() == 'gtc':
self.after(60000, self.update_tcs_table)
return
try:
tel_server = tcs.get_telescope_server()
telpars = tel_server.getTelescopeParams()
add_gtc_header_table_row(self.tcs_table, telpars)
except Exception as err:
g.clog.warn('Could not update table of TCS info')
# schedule next call for 60s later
self.after(60000, self.update_tcs_table) |
<SYSTEM_TASK:>
Periodically update TCS info.
<END_TASK>
<USER_TASK:>
Description:
def update_tcs(self):
"""
Periodically update TCS info.
A long running process, so run in a thread and fill a queue
""" |
g = get_root(self).globals
if not g.cpars['tcs_on']:
self.after(20000, self.update_tcs)
return
if g.cpars['telins_name'] == 'WHT':
tcsfunc = tcs.getWhtTcs
elif g.cpars['telins_name'] == 'GTC':
tcsfunc = tcs.getGtcTcs
else:
g.clog.debug('TCS error: could not recognise ' +
g.cpars['telins_name'])
return
def tcs_threaded_update():
try:
ra, dec, pa, focus = tcsfunc()
self.tcs_data_queue.put((ra, dec, pa, focus))
except Exception as err:
t, v, tb = sys.exc_info()
error = traceback.format_exception_only(t, v)[0].strip()
tback = 'TCS Traceback (most recent call last):\n' + \
''.join(traceback.format_tb(tb))
g.FIFO.put(('TCS', error, tback))
t = threading.Thread(target=tcs_threaded_update)
t.start()
self.after(20000, self.update_tcs) |
<SYSTEM_TASK:>
Periodically update the slide position.
<END_TASK>
<USER_TASK:>
Description:
def update_slidepos(self):
"""
Periodically update the slide position.
Also farmed out to a thread to avoid hanging GUI main thread
""" |
g = get_root(self).globals
if not g.cpars['focal_plane_slide_on']:
self.after(20000, self.update_slidepos)
return
def slide_threaded_update():
try:
(pos_ms, pos_mm, pos_px), msg = g.fpslide.slide.return_position()
self.slide_pos_queue.put((pos_ms, pos_mm, pos_px))
except Exception as err:
t, v, tb = sys.exc_info()
error = traceback.format_exception_only(t, v)[0].strip()
tback = 'Slide Traceback (most recent call last):\n' + \
''.join(traceback.format_tb(tb))
g.FIFO.put(('Slide', error, tback))
t = threading.Thread(target=slide_threaded_update)
t.start()
self.after(20000, self.update_slidepos) |
<SYSTEM_TASK:>
Synchronise the settings. This means that the pixel start
<END_TASK>
<USER_TASK:>
Description:
def sync(self):
"""
Synchronise the settings. This means that the pixel start
values are shifted downwards so that they are synchronised
with a full-frame binned version. This does nothing if the
binning factors == 1.
""" |
# needs some mods for ultracam ??
xbin = self.xbin.value()
ybin = self.ybin.value()
n = 0
for xsl, xsr, ys, nx, ny in self:
if xbin > 1:
xsl = xbin*((xsl-1)//xbin)+1
self.xsl[n].set(xsl)
xsr = xbin*((xsr-1025)//xbin)+1025
self.xsr[n].set(xsr)
if ybin > 1:
ys = ybin*((ys-1)//ybin)+1
self.ys[n].set(ys)
n += 1
g = get_root(self).globals
self.sbutt.config(bg=g.COL['main'])
self.sbutt.config(state='disable') |
<SYSTEM_TASK:>
Disable all but possibly not binning, which is needed for FF apps
<END_TASK>
<USER_TASK:>
Description:
def disable(self, everything=False):
"""
Disable all but possibly not binning, which is needed for FF apps
Parameters
---------
everything : bool
disable binning as well
""" |
self.freeze()
if not everything:
self.xbin.enable()
self.ybin.enable()
self.frozen = False |
<SYSTEM_TASK:>
Synchronise the settings.
<END_TASK>
<USER_TASK:>
Description:
def sync(self):
"""
Synchronise the settings.
This routine changes the window settings so that the pixel start
values are shifted downwards until they are synchronised with a
full-frame binned version. This does nothing if the binning factor
is 1.
""" |
xbin = self.xbin.value()
ybin = self.ybin.value()
if xbin == 1 and ybin == 1:
self.sbutt.config(state='disable')
return
for n, (xsll, xsul, xslr, xsur, ys, nx, ny) in enumerate(self):
if (xsll-1) % xbin != 0:
xsll = xbin * ((xsll-1)//xbin)+1
self.xsll[n].set(xsll)
if (xsul-1) % xbin != 0:
xsul = xbin * ((xsul-1)//xbin)+1
self.xsul[n].set(xsul)
if (xslr-1025) % xbin != 0:
xslr = xbin * ((xslr-1025)//xbin)+1025
self.xslr[n].set(xslr)
if (xsur-1025) % xbin != 0:
xsur = xbin * ((xsur-1025)//xbin)+1025
self.xsur[n].set(xsur)
if ybin > 1 and (ys-1) % ybin != 0:
ys = ybin*((ys-1)//ybin)+1
self.ys[n].set(ys)
self.sbutt.config(bg=g.COL['main'])
self.sbutt.config(state='disable') |
<SYSTEM_TASK:>
Synchronise the settings. This means that the pixel start
<END_TASK>
<USER_TASK:>
Description:
def sync(self, *args):
"""
Synchronise the settings. This means that the pixel start
values are shifted downwards so that they are synchronised
with a full-frame binned version. This does nothing if the
binning factor == 1
""" |
xbin = self.xbin.value()
ybin = self.ybin.value()
n = 0
for xs, ys, nx, ny in self:
if xbin > 1 and xs % xbin != 1:
if xs < 1025:
xs = xbin*((xs-1)//xbin)+1
else:
xs = xbin*((xs-1025)//xbin)+1025
self.xs[n].set(xs)
if ybin > 1 and ys % ybin != 1:
ys = ybin*((ys-1)//ybin)+1
self.ys[n].set(ys)
n += 1
self.sbutt.config(bg=g.COL['main'])
self.sbutt.config(state='disable') |
<SYSTEM_TASK:>
Freeze all settings so they can't be altered
<END_TASK>
<USER_TASK:>
Description:
def freeze(self):
"""
Freeze all settings so they can't be altered
""" |
for xs, ys, nx, ny in \
zip(self.xs, self.ys, self.nx, self.ny):
xs.disable()
ys.disable()
nx.disable()
ny.disable()
self.nwin.disable()
self.xbin.disable()
self.ybin.disable()
self.sbutt.disable()
self.frozen = True |
<SYSTEM_TASK:>
Check if the download of the given dict was successful. No proving if the content of the file is correct too.
<END_TASK>
<USER_TASK:>
Description:
def check_download(self, link_item_dict: Dict[str, LinkItem], folder: Path, log: bool = True) -> Tuple[
Dict[str, LinkItem], Dict[str, LinkItem]]:
"""
Check if the download of the given dict was successful. No proving if the content of the file is correct too.
:param link_item_dict: dict which to check
:type link_item_dict: Dict[str, ~unidown.plugin.link_item.LinkItem]
:param folder: folder where the downloads are saved
:type folder: ~pathlib.Path
:param log: if the lost items should be logged
:type log: bool
:return: succeeded and lost dicts
:rtype: Tuple[Dict[str, ~unidown.plugin.link_item.LinkItem], Dict[str, ~unidown.plugin.link_item.LinkItem]]
""" |
succeed = {link: item for link, item in link_item_dict.items() if folder.joinpath(item.name).is_file()}
lost = {link: item for link, item in link_item_dict.items() if link not in succeed}
if lost and log:
for link, item in lost.items():
self.log.error(f"Not downloaded: {self.info.host+link} - {item.name}")
return succeed, lost |
<SYSTEM_TASK:>
Download the given url to the given target folder.
<END_TASK>
<USER_TASK:>
Description:
def download_as_file(self, url: str, folder: Path, name: str, delay: float = 0) -> str:
"""
Download the given url to the given target folder.
:param url: link
:type url: str
:param folder: target folder
:type folder: ~pathlib.Path
:param name: target file name
:type name: str
:param delay: after download wait in seconds
:type delay: float
:return: url
:rtype: str
:raises ~urllib3.exceptions.HTTPError: if the connection has an error
""" |
while folder.joinpath(name).exists(): # TODO: handle already existing files
self.log.warning('already exists: ' + name)
name = name + '_d'
with self._downloader.request('GET', url, preload_content=False, retries=urllib3.util.retry.Retry(3)) as reader:
if reader.status == 200:
with folder.joinpath(name).open(mode='wb') as out_file:
out_file.write(reader.data)
else:
raise HTTPError(f"{url} | {reader.status}")
if delay > 0:
time.sleep(delay)
return url |
<SYSTEM_TASK:>
Save meta data about the downloaded things and the plugin to file.
<END_TASK>
<USER_TASK:>
Description:
def save_save_state(self, data_dict: Dict[str, LinkItem]): # TODO: add progressbar
"""
Save meta data about the downloaded things and the plugin to file.
:param data_dict: data
:type data_dict: Dict[link, ~unidown.plugin.link_item.LinkItem]
""" |
json_data = json_format.MessageToJson(self._create_save_state(data_dict).to_protobuf())
with self._save_state_file.open(mode='w', encoding="utf8") as writer:
writer.write(json_data) |
<SYSTEM_TASK:>
Get links who needs to be downloaded by comparing old and the new data.
<END_TASK>
<USER_TASK:>
Description:
def get_updated_data(self, old_data: Dict[str, LinkItem]) -> Dict[str, LinkItem]:
"""
Get links who needs to be downloaded by comparing old and the new data.
:param old_data: old data
:type old_data: Dict[str, ~unidown.plugin.link_item.LinkItem]
:return: data which is newer or dont exist in the old one
:rtype: Dict[str, ~unidown.plugin.link_item.LinkItem]
""" |
if not self.download_data:
return {}
new_link_item_dict = {}
for link, link_item in tqdm(self.download_data.items(), desc="Compare with save", unit="item", leave=True,
mininterval=1, ncols=100, disable=dynamic_data.DISABLE_TQDM):
# TODO: add methode to log lost items, which are in old but not in new
# if link in new_link_item_dict: # TODO: is ever false, since its the key of a dict: move to the right place
# self.log.warning("Duplicate: " + link + " - " + new_link_item_dict[link] + " : " + link_item)
# if the new_data link does not exists in old_data or new_data time is newer
if (link not in old_data) or (link_item.time > old_data[link].time):
new_link_item_dict[link] = link_item
return new_link_item_dict |
<SYSTEM_TASK:>
Convert the option list to a dictionary where the key is the option and the value is the related option.
<END_TASK>
<USER_TASK:>
Description:
def _get_options_dic(self, options: List[str]) -> Dict[str, str]:
"""
Convert the option list to a dictionary where the key is the option and the value is the related option.
Is called in the init.
:param options: options given to the plugin.
:type options: List[str]
:return: dictionary which contains the option key as str related to the option string
:rtype Dict[str, str]
""" |
options_dic = {}
for option in options:
cur_option = option.split("=")
if len(cur_option) != 2:
self.log.warning(f"'{option}' is not valid and will be ignored.")
options_dic[cur_option[0]] = cur_option[1]
return options_dic |
<SYSTEM_TASK:>
Find the difference between apparent and mean solar time
<END_TASK>
<USER_TASK:>
Description:
def _equation_of_time(t):
"""
Find the difference between apparent and mean solar time
Parameters
----------
t : `~astropy.time.Time`
times (array)
Returns
----------
ret1 : `~astropy.units.Quantity`
the equation of time
""" |
# Julian centuries since J2000.0
T = (t - Time("J2000")).to(u.year).value / 100
# obliquity of ecliptic (Meeus 1998, eq 22.2)
poly_pars = (84381.448, 46.8150, 0.00059, 0.001813)
eps = u.Quantity(polyval(T, poly_pars), u.arcsec)
y = np.tan(eps/2)**2
# Sun's mean longitude (Meeus 1998, eq 25.2)
poly_pars = (280.46646, 36000.76983, 0.0003032)
L0 = u.Quantity(polyval(T, poly_pars), u.deg)
# Sun's mean anomaly (Meeus 1998, eq 25.3)
poly_pars = (357.52911, 35999.05029, 0.0001537)
M = u.Quantity(polyval(T, poly_pars), u.deg)
# eccentricity of Earth's orbit (Meeus 1998, eq 25.4)
poly_pars = (0.016708634, -0.000042037, -0.0000001267)
e = polyval(T, poly_pars)
# equation of time, radians (Meeus 1998, eq 28.3)
eot = (y * np.sin(2*L0) - 2*e*np.sin(M) + 4*e*y*np.sin(M)*np.cos(2*L0) -
0.5*y**2 * np.sin(4*L0) - 5*e**2 * np.sin(2*M)/4) * u.rad
return eot.to(u.hourangle) |
<SYSTEM_TASK:>
Convert a Local Sidereal Time to an astropy Time object.
<END_TASK>
<USER_TASK:>
Description:
def _astropy_time_from_LST(t, LST, location, prev_next):
"""
Convert a Local Sidereal Time to an astropy Time object.
The local time is related to the LST through the RA of the Sun.
This routine uses this relationship to convert a LST to an astropy
time object.
Returns
-------
ret1 : `~astropy.time.Time`
time corresponding to LST
""" |
# now we need to figure out time to return from LST
raSun = coord.get_sun(t).ra
# calculate Greenwich Apparent Solar Time, which we will use as ~UTC for now
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# ignore astropy deprecation warnings
lon = location.longitude
solarTime = LST - raSun + 12*u.hourangle - lon
# assume this is on the same day as supplied time, and fix later
first_guess = Time(
u.d*int(t.mjd) + u.hour*solarTime.wrap_at('360d').hour,
format='mjd'
)
# Equation of time is difference between GAST and UTC
eot = _equation_of_time(first_guess)
first_guess = first_guess - u.hour * eot.value
if prev_next == 'next':
# if 'next', we want time to be greater than given time
mask = first_guess < t
rise_set_time = first_guess + mask * u.sday
else:
# if 'previous', we want time to be less than given time
mask = first_guess > t
rise_set_time = first_guess - mask * u.sday
return rise_set_time |
<SYSTEM_TASK:>
Do linear interpolation between two ``altitudes`` at
<END_TASK>
<USER_TASK:>
Description:
def _two_point_interp(times, altitudes, horizon=0*u.deg):
"""
Do linear interpolation between two ``altitudes`` at
two ``times`` to determine the time where the altitude
goes through zero.
Parameters
----------
times : `~astropy.time.Time`
Two times for linear interpolation between
altitudes : array of `~astropy.units.Quantity`
Two altitudes for linear interpolation between
horizon : `~astropy.units.Quantity`
Solve for the time when the altitude is equal to
reference_alt.
Returns
-------
t : `~astropy.time.Time`
Time when target crosses the horizon
""" |
if not isinstance(times, Time):
return MAGIC_TIME
else:
slope = (altitudes[1] - altitudes[0])/(times[1].jd - times[0].jd)
return Time(times[1].jd - ((altitudes[1] - horizon)/slope).value,
format='jd') |
<SYSTEM_TASK:>
Initialize the main directories.
<END_TASK>
<USER_TASK:>
Description:
def init_dirs(main_dir: Path, logfilepath: Path):
"""
Initialize the main directories.
:param main_dir: main directory
:type main_dir: ~pathlib.Path
:param logfilepath: log file
:type logfilepath: ~pathlib.Path
""" |
global MAIN_DIR, TEMP_DIR, DOWNLOAD_DIR, SAVESTAT_DIR, LOGFILE_PATH
MAIN_DIR = main_dir
TEMP_DIR = MAIN_DIR.joinpath(Path('temp/'))
DOWNLOAD_DIR = MAIN_DIR.joinpath(Path('downloads/'))
SAVESTAT_DIR = MAIN_DIR.joinpath(Path('savestates/'))
LOGFILE_PATH = MAIN_DIR.joinpath(logfilepath) |
<SYSTEM_TASK:>
Check the directories if they exist.
<END_TASK>
<USER_TASK:>
Description:
def check_dirs():
"""
Check the directories if they exist.
:raises FileExistsError: if a file exists but is not a directory
""" |
dirs = [MAIN_DIR, TEMP_DIR, DOWNLOAD_DIR, SAVESTAT_DIR]
for directory in dirs:
if directory.exists() and not directory.is_dir():
raise FileExistsError(str(directory.resolve()) + " cannot be used as a directory.") |
<SYSTEM_TASK:>
Parse a single item from the telescope server into name, value, comment.
<END_TASK>
<USER_TASK:>
Description:
def parse_hstring(hs):
"""
Parse a single item from the telescope server into name, value, comment.
""" |
# split the string on = and /, also stripping whitespace and annoying quotes
name, value, comment = yield_three(
[val.strip().strip("'") for val in filter(None, re.split("[=/]+", hs))]
)
# if comment has a slash in it, put it back together
try:
len(comment)
except:
pass
else:
comment = '/'.join(comment)
return name, value, comment |
<SYSTEM_TASK:>
Create a list of fits header items from GTC telescope pars.
<END_TASK>
<USER_TASK:>
Description:
def create_header_from_telpars(telpars):
"""
Create a list of fits header items from GTC telescope pars.
The GTC telescope server gives a list of string describing
FITS header items such as RA, DEC, etc.
Arguments
---------
telpars : list
list returned by server call to getTelescopeParams
""" |
# pars is a list of strings describing tel info in FITS
# style, each entry in the list is a different class of
# thing (weather, telescope, instrument etc).
# first, we munge it into a single list of strings, each one
# describing a single item whilst also stripping whitespace
pars = [val.strip() for val in (';').join(telpars).split(';')
if val.strip() != '']
# apply parse_hstring to everything in pars
with warnings.catch_warnings():
warnings.simplefilter('ignore', fits.verify.VerifyWarning)
hdr = fits.Header(map(parse_hstring, pars))
return hdr |
<SYSTEM_TASK:>
Add a row with current values to GTC table
<END_TASK>
<USER_TASK:>
Description:
def add_gtc_header_table_row(t, telpars):
"""
Add a row with current values to GTC table
Arguments
---------
t : `~astropy.table.Table`
The table to append row to
telpars : list
list returned by server call to getTelescopeParams
""" |
now = Time.now().mjd
hdr = create_header_from_telpars(telpars)
# make dictionary of vals to put in table
vals = {k: v for k, v in hdr.items() if k in VARIABLE_GTC_KEYS}
vals['MJD'] = now
# store LST as hourangle
vals['LST'] = Longitude(vals['LST'], unit=u.hour).hourangle
t.add_row(vals) |
<SYSTEM_TASK:>
Returns a string with the article + the word.
<END_TASK>
<USER_TASK:>
Description:
def referenced(word, article=INDEFINITE, gender=MALE, role=SUBJECT):
""" Returns a string with the article + the word.
""" |
return "%s %s" % (_article(word, article, gender, role), word) |
<SYSTEM_TASK:>
Returns the singular of a given word.
<END_TASK>
<USER_TASK:>
Description:
def singularize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}):
""" Returns the singular of a given word.
The inflection is based on probability rather than gender and role.
""" |
w = word.lower().capitalize()
if word in custom:
return custom[word]
if word in singular:
return singular[word]
if pos == NOUN:
for a, b in singular_inflections:
if w.endswith(a):
return w[:-len(a)] + b
# Default rule: strip known plural suffixes (baseline = 51%).
for suffix in ("nen", "en", "n", "e", "er", "s"):
if w.endswith(suffix):
w = w[:-len(suffix)]
break
# Corrections (these add about 1% accuracy):
if w.endswith(("rr", "rv", "nz")):
return w + "e"
return w
return w |
<SYSTEM_TASK:>
Return a set of all words in a dataset.
<END_TASK>
<USER_TASK:>
Description:
def _get_words_from_dataset(dataset):
"""Return a set of all words in a dataset.
:param dataset: A list of tuples of the form ``(words, label)`` where
``words`` is either a string of a list of tokens.
""" |
# Words may be either a string or a list of tokens. Return an iterator
# of tokens accordingly
def tokenize(words):
if isinstance(words, basestring):
return word_tokenize(words, include_punc=False)
else:
return words
all_words = chain.from_iterable(tokenize(words) for words, _ in dataset)
return set(all_words) |
<SYSTEM_TASK:>
A basic document feature extractor that returns a dict indicating what
<END_TASK>
<USER_TASK:>
Description:
def basic_extractor(document, train_set):
"""A basic document feature extractor that returns a dict indicating what
words in ``train_set`` are contained in ``document``.
:param document: The text to extract features from. Can be a string or an iterable.
:param list train_set: Training data set, a list of tuples of the form
``(words, label)``.
""" |
word_features = _get_words_from_dataset(train_set)
tokens = _get_document_tokens(document)
features = dict(((u'contains({0})'.format(word), (word in tokens))
for word in word_features))
return features |
<SYSTEM_TASK:>
A basic document feature extractor that returns a dict of words that the
<END_TASK>
<USER_TASK:>
Description:
def contains_extractor(document):
"""A basic document feature extractor that returns a dict of words that the
document contains.""" |
tokens = _get_document_tokens(document)
features = dict((u'contains({0})'.format(w), True) for w in tokens)
return features |
<SYSTEM_TASK:>
Reads a data file and returns and iterable that can be used as
<END_TASK>
<USER_TASK:>
Description:
def _read_data(self, dataset, format=None):
"""Reads a data file and returns and iterable that can be used as
testing or training data.""" |
# Attempt to detect file format if "format" isn't specified
if not format:
format_class = formats.detect(dataset)
else:
if format not in formats.AVAILABLE.keys():
raise ValueError("'{0}' format not supported.".format(format))
format_class = formats.AVAILABLE[format]
return format_class(dataset).to_iterable() |
<SYSTEM_TASK:>
Extracts features from a body of text.
<END_TASK>
<USER_TASK:>
Description:
def extract_features(self, text):
"""Extracts features from a body of text.
:rtype: dictionary of features
""" |
# Feature extractor may take one or two arguments
try:
return self.feature_extractor(text, self.train_set)
except (TypeError, AttributeError):
return self.feature_extractor(text) |
<SYSTEM_TASK:>
Train the classifier with a labeled feature set and return the
<END_TASK>
<USER_TASK:>
Description:
def train(self, *args, **kwargs):
"""Train the classifier with a labeled feature set and return the
classifier. Takes the same arguments as the wrapped NLTK class. This
method is implicitly called when calling ``classify`` or ``accuracy``
methods and is included only to allow passing in arguments to the
``train`` method of the wrapped NLTK class.
.. versionadded:: 0.6.2
:rtype: A classifier
""" |
try:
self.classifier = self.nltk_class.train(self.train_features,
*args, **kwargs)
return self.classifier
except AttributeError:
raise ValueError("NLTKClassifier must have a nltk_class"
" variable that is not None.") |
<SYSTEM_TASK:>
Compute the accuracy on a test set.
<END_TASK>
<USER_TASK:>
Description:
def accuracy(self, test_set, format=None):
"""Compute the accuracy on a test set.
:param test_set: A list of tuples of the form ``(text, label)``, or a
filename.
:param format: If ``test_set`` is a filename, the file format, e.g.
``"csv"`` or ``"json"``. If ``None``, will attempt to detect the
file format.
""" |
if isinstance(test_set, basestring): # test_set is a filename
test_data = self._read_data(test_set)
else: # test_set is a list of tuples
test_data = test_set
test_features = [(self.extract_features(d), c) for d, c in test_data]
return nltk.classify.accuracy(self.classifier, test_features) |
<SYSTEM_TASK:>
Train the classifier with a labeled and unlabeled feature sets and
<END_TASK>
<USER_TASK:>
Description:
def train(self, *args, **kwargs):
"""Train the classifier with a labeled and unlabeled feature sets and
return the classifier. Takes the same arguments as the wrapped NLTK
class. This method is implicitly called when calling ``classify`` or
``accuracy`` methods and is included only to allow passing in arguments
to the ``train`` method of the wrapped NLTK class.
:rtype: A classifier
""" |
self.classifier = self.nltk_class.train(self.positive_features,
self.unlabeled_features,
self.positive_prob_prior)
return self.classifier |
<SYSTEM_TASK:>
Returns all possible variations of a sequence with optional items.
<END_TASK>
<USER_TASK:>
Description:
def variations(iterable, optional=lambda x: False):
""" Returns all possible variations of a sequence with optional items.
""" |
# For example: variations(["A?", "B?", "C"], optional=lambda s: s.endswith("?"))
# defines a sequence where constraint A and B are optional:
# [("A?", "B?", "C"), ("B?", "C"), ("A?", "C"), ("C")]
iterable = tuple(iterable)
# Create a boolean sequence where True means optional:
# ("A?", "B?", "C") => [True, True, False]
o = [optional(x) for x in iterable]
# Find all permutations of the boolean sequence:
# [True, False, True], [True, False, False], [False, False, True], [False, False, False].
# Map to sequences of constraints whose index in the boolean sequence yields True.
a = set()
for p in product([False, True], repeat=sum(o)):
p = list(p)
v = [b and (b and p.pop(0)) for b in o]
v = tuple(iterable[i] for i in xrange(len(v)) if not v[i])
a.add(v)
# Longest-first.
return sorted(a, cmp=lambda x, y: len(y) - len(x)) |
<SYSTEM_TASK:>
Returns a list of all semantic types for the given term.
<END_TASK>
<USER_TASK:>
Description:
def parents(self, term, recursive=False, **kwargs):
""" Returns a list of all semantic types for the given term.
If recursive=True, traverses parents up to the root.
""" |
def dfs(term, recursive=False, visited={}, **kwargs):
if term in visited: # Break on cyclic relations.
return []
visited[term], a = True, []
if dict.__contains__(self, term):
a = self[term][0].keys()
for classifier in self.classifiers:
a.extend(classifier.parents(term, **kwargs) or [])
if recursive:
for w in a: a += dfs(w, recursive, visited, **kwargs)
return a
return unique(dfs(self._normalize(term), recursive, {}, **kwargs)) |
<SYSTEM_TASK:>
Returns the constraint that matches the given Word, or None.
<END_TASK>
<USER_TASK:>
Description:
def constraint(self, word):
""" Returns the constraint that matches the given Word, or None.
""" |
if word.index in self._map1:
return self._map1[word.index] |
<SYSTEM_TASK:>
Returns a list of constraints that match the given Chunk.
<END_TASK>
<USER_TASK:>
Description:
def constraints(self, chunk):
""" Returns a list of constraints that match the given Chunk.
""" |
a = [self._map1[w.index] for w in chunk.words if w.index in self._map1]
b = []; [b.append(constraint) for constraint in a if constraint not in b]
return b |
<SYSTEM_TASK:>
Returns a parsed Text from the given parsed string.
<END_TASK>
<USER_TASK:>
Description:
def tree(s, token=[WORD, POS, CHUNK, PNP, REL, LEMMA]):
""" Returns a parsed Text from the given parsed string.
""" |
return Text(s, token) |
<SYSTEM_TASK:>
Convenience function for tokenizing text into words.
<END_TASK>
<USER_TASK:>
Description:
def word_tokenize(text, tokenizer=None, include_punc=True, *args, **kwargs):
"""Convenience function for tokenizing text into words.
NOTE: NLTK's word tokenizer expects sentences as input, so the text will be
tokenized to sentences before being tokenized to words.
This function returns an itertools chain object (generator).
""" |
_tokenizer = tokenizer if tokenizer is not None else NLTKPunktTokenizer()
words = chain.from_iterable(
WordTokenizer(tokenizer=_tokenizer).itokenize(sentence, include_punc,
*args, **kwargs)
for sentence in sent_tokenize(text, tokenizer=_tokenizer))
return words |
<SYSTEM_TASK:>
The Treebank tokenizer uses regular expressions to tokenize text as
<END_TASK>
<USER_TASK:>
Description:
def word_tokenize(self, text, include_punc=True):
"""The Treebank tokenizer uses regular expressions to tokenize text as
in Penn Treebank.
It assumes that the text has already been segmented into sentences,
e.g. using ``self.sent_tokenize()``.
This tokenizer performs the following steps:
- split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll``
- treat most punctuation characters as separate tokens
- split off commas and single quotes, when followed by whitespace
- separate periods that appear at the end of line
Source: NLTK's docstring of ``TreebankWordTokenizer`` (accessed: 02/10/2014)
""" |
#: Do not process empty strings (Issue #3)
if text.strip() == "":
return []
_tokens = self.word_tok.tokenize(text)
#: Handle strings consisting of a single punctuation mark seperately (Issue #4)
if len(_tokens) == 1:
if _tokens[0] in PUNCTUATION:
if include_punc:
return _tokens
else:
return []
if include_punc:
return _tokens
else:
# Return each word token
# Strips punctuation unless the word comes from a contraction
# e.g. "gibt's" => ["gibt", "'s"] in "Heute gibt's viel zu tun!"
# e.g. "hat's" => ["hat", "'s"]
# e.g. "home." => ['home']
words = [
word if word.startswith("'") else strip_punc(
word,
all=False) for word in _tokens if strip_punc(
word,
all=False)]
return list(words) |
<SYSTEM_TASK:>
Returns a list of sentences.
<END_TASK>
<USER_TASK:>
Description:
def sent_tokenize(self, text, **kwargs):
"""Returns a list of sentences.
Each sentence is a space-separated string of tokens (words).
Handles common cases of abbreviations (e.g., etc., ...).
Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence.
Headings without an ending period are inferred by line breaks.
""" |
sentences = find_sentences(text,
punctuation=kwargs.get(
"punctuation",
PUNCTUATION),
abbreviations=kwargs.get(
"abbreviations",
ABBREVIATIONS_DE),
replace=kwargs.get("replace", replacements),
linebreak=r"\n{2,}")
return sentences |
<SYSTEM_TASK:>
Parses the text.
<END_TASK>
<USER_TASK:>
Description:
def parse(self, text):
"""Parses the text.
``pattern.de.parse(**kwargs)`` can be passed to the parser instance and
are documented in the main docstring of
:class:`PatternParser() <textblob_de.parsers.PatternParser>`.
:param str text: A string.
""" |
#: Do not process empty strings (Issue #3)
if text.strip() == "":
return ""
#: Do not process strings consisting of a single punctuation mark (Issue #4)
elif text.strip() in PUNCTUATION:
_sym = text.strip()
if _sym in tuple('.?!'):
_tag = "."
else:
_tag = _sym
if self.lemmata:
return "{0}/{1}/O/O/{0}".format(_sym, _tag)
else:
return "{0}/{1}/O/O".format(_sym, _tag)
if self.tokenize:
_tokenized = " ".join(self.tokenizer.tokenize(text))
else:
_tokenized = text
_parsed = pattern_parse(_tokenized,
# text is tokenized before it is passed on to
# pattern.de.parse
tokenize=False,
tags=self.tags, chunks=self.chunks,
relations=self.relations, lemmata=self.lemmata,
encoding=self.encoding, tagset=self.tagset)
if self.pprint:
_parsed = pattern_pprint(_parsed)
return _parsed |
<SYSTEM_TASK:>
Filter insignificant words for key noun phrase extraction.
<END_TASK>
<USER_TASK:>
Description:
def _filter_extracted(self, extracted_list):
"""Filter insignificant words for key noun phrase extraction.
determiners, relative pronouns, reflexive pronouns
In general, pronouns are not useful, as you need context to know what they refer to.
Most of the pronouns, however, are filtered out by blob.noun_phrase method's
np length (>1) filter
:param list extracted_list: A list of noun phrases extracted from parser output.
""" |
_filtered = []
for np in extracted_list:
_np = np.split()
if _np[0] in INSIGNIFICANT:
_np.pop(0)
try:
if _np[-1] in INSIGNIFICANT:
_np.pop(-1)
# e.g. 'welcher die ...'
if _np[0] in INSIGNIFICANT:
_np.pop(0)
except IndexError:
_np = []
if len(_np) > 0:
_filtered.append(" ".join(_np))
return _filtered |
<SYSTEM_TASK:>
Tag a string `sentence`.
<END_TASK>
<USER_TASK:>
Description:
def tag(self, sentence, tokenize=True):
"""Tag a string `sentence`.
:param str or list sentence: A string or a list of sentence strings.
:param tokenize: (optional) If ``False`` string has to be tokenized before
(space separated string).
""" |
#: Do not process empty strings (Issue #3)
if sentence.strip() == "":
return []
#: Do not process strings consisting of a single punctuation mark (Issue #4)
elif sentence.strip() in PUNCTUATION:
if self.include_punc:
_sym = sentence.strip()
if _sym in tuple('.?!'):
_tag = "."
else:
_tag = _sym
return [(_sym, _tag)]
else:
return []
if tokenize:
_tokenized = " ".join(self.tokenizer.tokenize(sentence))
sentence = _tokenized
# Sentence is tokenized before it is passed on to pattern.de.tag
# (i.e. it is either submitted tokenized or if )
_tagged = pattern_tag(sentence, tokenize=False,
encoding=self.encoding,
tagset=self.tagset)
if self.include_punc:
return _tagged
else:
_tagged = [
(word, t) for word, t in _tagged if not PUNCTUATION_REGEX.match(
unicode(t))]
return _tagged |
<SYSTEM_TASK:>
Given a command, mode, and a PATH string, return the path which conforms
<END_TASK>
<USER_TASK:>
Description:
def _shutil_which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which conforms
to the given mode on the PATH, or None if there is no such file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
""" |
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any([cmd.lower().endswith(ext.lower()) for ext in pathext]):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None |
<SYSTEM_TASK:>
Return the lemma of each word in this WordList.
<END_TASK>
<USER_TASK:>
Description:
def lemmatize(self):
"""Return the lemma of each word in this WordList.
Currently using NLTKPunktTokenizer() for all lemmatization
tasks. This might cause slightly different tokenization results
compared to the TextBlob.words property.
""" |
_lemmatizer = PatternParserLemmatizer(tokenizer=NLTKPunktTokenizer())
# WordList object --> Sentence.string
# add a period (improves parser accuracy)
_raw = " ".join(self) + "."
_lemmas = _lemmatizer.lemmatize(_raw)
return self.__class__([Word(l, t) for l, t in _lemmas]) |
<SYSTEM_TASK:>
Return a list of tokens, using ``tokenizer``.
<END_TASK>
<USER_TASK:>
Description:
def tokenize(self, tokenizer=None):
"""Return a list of tokens, using ``tokenizer``.
:param tokenizer: (optional) A tokenizer object. If None, defaults to
this blob's default tokenizer.
""" |
t = tokenizer if tokenizer is not None else self.tokenizer
return WordList(t.tokenize(self.raw)) |
<SYSTEM_TASK:>
Returns a list of noun phrases for this blob.
<END_TASK>
<USER_TASK:>
Description:
def noun_phrases(self):
"""Returns a list of noun phrases for this blob.""" |
return WordList([phrase.strip()
for phrase in self.np_extractor.extract(self.raw)
if len(phrase.split()) > 1]) |
<SYSTEM_TASK:>
Return a list of word tokens. This excludes punctuation characters.
<END_TASK>
<USER_TASK:>
Description:
def words(self):
"""Return a list of word tokens. This excludes punctuation characters.
If you want to include punctuation characters, access the ``tokens``
property.
:returns: A :class:`WordList <WordList>` of word tokens.
""" |
return WordList(
word_tokenize(self.raw, self.tokenizer, include_punc=False)) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.