text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Convert sexegessimal RA string into a float in degrees.
<END_TASK>
<USER_TASK:>
Description:
def dec2dec(dec):
"""
Convert sexegessimal RA string into a float in degrees.
Parameters
----------
dec : string
A string separated representing the Dec.
Expected format is `[+- ]hh:mm[:ss.s]`
Colons can be replaced with any whit space character.
Returns
-------
dec : float
The Dec in degrees.
""" |
d = dec.replace(':', ' ').split()
if len(d) == 2:
d.append(0.0)
if d[0].startswith('-') or float(d[0]) < 0:
return float(d[0]) - float(d[1]) / 60.0 - float(d[2]) / 3600.0
return float(d[0]) + float(d[1]) / 60.0 + float(d[2]) / 3600.0 |
<SYSTEM_TASK:>
Convert decimal degrees into a sexagessimal string in degrees.
<END_TASK>
<USER_TASK:>
Description:
def dec2dms(x):
"""
Convert decimal degrees into a sexagessimal string in degrees.
Parameters
----------
x : float
Angle in degrees
Returns
-------
dms : string
String of format [+-]DD:MM:SS.SS
or XX:XX:XX.XX if x is not finite.
""" |
if not np.isfinite(x):
return 'XX:XX:XX.XX'
if x < 0:
sign = '-'
else:
sign = '+'
x = abs(x)
d = int(math.floor(x))
m = int(math.floor((x - d) * 60))
s = float(( (x - d) * 60 - m) * 60)
return '{0}{1:02d}:{2:02d}:{3:05.2f}'.format(sign, d, m, s) |
<SYSTEM_TASK:>
Convert decimal degrees into a sexagessimal string in hours.
<END_TASK>
<USER_TASK:>
Description:
def dec2hms(x):
"""
Convert decimal degrees into a sexagessimal string in hours.
Parameters
----------
x : float
Angle in degrees
Returns
-------
dms : string
String of format HH:MM:SS.SS
or XX:XX:XX.XX if x is not finite.
""" |
if not np.isfinite(x):
return 'XX:XX:XX.XX'
# wrap negative RA's
if x < 0:
x += 360
x /= 15.0
h = int(x)
x = (x - h) * 60
m = int(x)
s = (x - m) * 60
return '{0:02d}:{1:02d}:{2:05.2f}'.format(h, m, s) |
<SYSTEM_TASK:>
Created a masked version of file, using a region.
<END_TASK>
<USER_TASK:>
Description:
def mask_file(regionfile, infile, outfile, negate=False):
"""
Created a masked version of file, using a region.
Parameters
----------
regionfile : str
A file which can be loaded as a :class:`AegeanTools.regions.Region`.
The image will be masked according to this region.
infile : str
Input FITS image.
outfile : str
Output FITS image.
negate : bool
If True then pixels *outside* the region are masked.
Default = False.
See Also
--------
:func:`AegeanTools.MIMAS.mask_plane`
""" |
# Check that the input file is accessible and then open it
if not os.path.exists(infile): raise AssertionError("Cannot locate fits file {0}".format(infile))
im = pyfits.open(infile)
if not os.path.exists(regionfile): raise AssertionError("Cannot locate region file {0}".format(regionfile))
region = Region.load(regionfile)
try:
wcs = pywcs.WCS(im[0].header, naxis=2)
except: # TODO: figure out what error is being thrown
wcs = pywcs.WCS(str(im[0].header), naxis=2)
if len(im[0].data.shape) > 2:
data = np.squeeze(im[0].data)
else:
data = im[0].data
print(data.shape)
if len(data.shape) == 3:
for plane in range(data.shape[0]):
mask_plane(data[plane], wcs, region, negate)
else:
mask_plane(data, wcs, region, negate)
im[0].data = data
im.writeto(outfile, overwrite=True)
logging.info("Wrote {0}".format(outfile))
return |
<SYSTEM_TASK:>
Convert a string that describes a box in ds9 format, into a polygon that is given by the corners of the box
<END_TASK>
<USER_TASK:>
Description:
def box2poly(line):
"""
Convert a string that describes a box in ds9 format, into a polygon that is given by the corners of the box
Parameters
----------
line : str
A string containing a DS9 region command for a box.
Returns
-------
poly : [ra, dec, ...]
The corners of the box in clockwise order from top left.
""" |
words = re.split('[(\s,)]', line)
ra = words[1]
dec = words[2]
width = words[3]
height = words[4]
if ":" in ra:
ra = Angle(ra, unit=u.hour)
else:
ra = Angle(ra, unit=u.degree)
dec = Angle(dec, unit=u.degree)
width = Angle(float(width[:-1])/2, unit=u.arcsecond) # strip the "
height = Angle(float(height[:-1])/2, unit=u.arcsecond) # strip the "
center = SkyCoord(ra, dec)
tl = center.ra.degree+width.degree, center.dec.degree+height.degree
tr = center.ra.degree-width.degree, center.dec.degree+height.degree
bl = center.ra.degree+width.degree, center.dec.degree-height.degree
br = center.ra.degree-width.degree, center.dec.degree-height.degree
return np.ravel([tl, tr, br, bl]).tolist() |
<SYSTEM_TASK:>
Parse a string that describes a circle in ds9 format.
<END_TASK>
<USER_TASK:>
Description:
def circle2circle(line):
"""
Parse a string that describes a circle in ds9 format.
Parameters
----------
line : str
A string containing a DS9 region command for a circle.
Returns
-------
circle : [ra, dec, radius]
The center and radius of the circle.
""" |
words = re.split('[(,\s)]', line)
ra = words[1]
dec = words[2]
radius = words[3][:-1] # strip the "
if ":" in ra:
ra = Angle(ra, unit=u.hour)
else:
ra = Angle(ra, unit=u.degree)
dec = Angle(dec, unit=u.degree)
radius = Angle(radius, unit=u.arcsecond)
return [ra.degree, dec.degree, radius.degree] |
<SYSTEM_TASK:>
Parse a string of text containing a DS9 description of a polygon.
<END_TASK>
<USER_TASK:>
Description:
def poly2poly(line):
"""
Parse a string of text containing a DS9 description of a polygon.
This function works but is not very robust due to the constraints of healpy.
Parameters
----------
line : str
A string containing a DS9 region command for a polygon.
Returns
-------
poly : [ra, dec, ...]
The coordinates of the polygon.
""" |
words = re.split('[(\s,)]', line)
ras = np.array(words[1::2])
decs = np.array(words[2::2])
coords = []
for ra, dec in zip(ras, decs):
if ra.strip() == '' or dec.strip() == '':
continue
if ":" in ra:
pos = SkyCoord(Angle(ra, unit=u.hour), Angle(dec, unit=u.degree))
else:
pos = SkyCoord(Angle(ra, unit=u.degree), Angle(dec, unit=u.degree))
# only add this point if it is some distance from the previous one
coords.extend([pos.ra.degree, pos.dec.degree])
return coords |
<SYSTEM_TASK:>
Return a region that is the combination of those specified in the container.
<END_TASK>
<USER_TASK:>
Description:
def combine_regions(container):
"""
Return a region that is the combination of those specified in the container.
The container is typically a results instance that comes from argparse.
Order of construction is: add regions, subtract regions, add circles, subtract circles,
add polygons, subtract polygons.
Parameters
----------
container : :class:`AegeanTools.MIMAS.Dummy`
The regions to be combined.
Returns
-------
region : :class:`AegeanTools.regions.Region`
The constructed region.
""" |
# create empty region
region = Region(container.maxdepth)
# add/rem all the regions from files
for r in container.add_region:
logging.info("adding region from {0}".format(r))
r2 = Region.load(r[0])
region.union(r2)
for r in container.rem_region:
logging.info("removing region from {0}".format(r))
r2 = Region.load(r[0])
region.without(r2)
# add circles
if len(container.include_circles) > 0:
for c in container.include_circles:
circles = np.radians(np.array(c))
if container.galactic:
l, b, radii = circles.reshape(3, circles.shape[0]//3)
ras, decs = galactic2fk5(l, b)
else:
ras, decs, radii = circles.reshape(3, circles.shape[0]//3)
region.add_circles(ras, decs, radii)
# remove circles
if len(container.exclude_circles) > 0:
for c in container.exclude_circles:
r2 = Region(container.maxdepth)
circles = np.radians(np.array(c))
if container.galactic:
l, b, radii = circles.reshape(3, circles.shape[0]//3)
ras, decs = galactic2fk5(l, b)
else:
ras, decs, radii = circles.reshape(3, circles.shape[0]//3)
r2.add_circles(ras, decs, radii)
region.without(r2)
# add polygons
if len(container.include_polygons) > 0:
for p in container.include_polygons:
poly = np.radians(np.array(p))
poly = poly.reshape((poly.shape[0]//2, 2))
region.add_poly(poly)
# remove polygons
if len(container.exclude_polygons) > 0:
for p in container.include_polygons:
poly = np.array(np.radians(p))
r2 = Region(container.maxdepth)
r2.add_poly(poly)
region.without(r2)
return region |
<SYSTEM_TASK:>
Construct a region which is the intersection of all regions described in the given
<END_TASK>
<USER_TASK:>
Description:
def intersect_regions(flist):
"""
Construct a region which is the intersection of all regions described in the given
list of file names.
Parameters
----------
flist : list
A list of region filenames.
Returns
-------
region : :class:`AegeanTools.regions.Region`
The intersection of all regions, possibly empty.
""" |
if len(flist) < 2:
raise Exception("Require at least two regions to perform intersection")
a = Region.load(flist[0])
for b in [Region.load(f) for f in flist[1:]]:
a.intersect(b)
return a |
<SYSTEM_TASK:>
Save the given region to a file
<END_TASK>
<USER_TASK:>
Description:
def save_region(region, filename):
"""
Save the given region to a file
Parameters
----------
region : :class:`AegeanTools.regions.Region`
A region.
filename : str
Output file name.
""" |
region.save(filename)
logging.info("Wrote {0}".format(filename))
return |
<SYSTEM_TASK:>
Set the image data.
<END_TASK>
<USER_TASK:>
Description:
def set_pixels(self, pixels):
"""
Set the image data.
Will not work if the new image has a different shape than the current image.
Parameters
----------
pixels : numpy.ndarray
New image data
Returns
-------
None
""" |
if not (pixels.shape == self._pixels.shape):
raise AssertionError("Shape mismatch between pixels supplied {0} and existing image pixels {1}".format(pixels.shape,self._pixels.shape))
self._pixels = pixels
# reset this so that it is calculated next time the function is called
self._rms = None
return |
<SYSTEM_TASK:>
Get the sky coordinates for a given image pixel.
<END_TASK>
<USER_TASK:>
Description:
def pix2sky(self, pixel):
"""
Get the sky coordinates for a given image pixel.
Parameters
----------
pixel : (float, float)
Image coordinates.
Returns
-------
ra,dec : float
Sky coordinates (degrees)
""" |
pixbox = numpy.array([pixel, pixel])
skybox = self.wcs.all_pix2world(pixbox, 1)
return [float(skybox[0][0]), float(skybox[0][1])] |
<SYSTEM_TASK:>
Open a file, read contents, return a list of all the sources in that file.
<END_TASK>
<USER_TASK:>
Description:
def load_sources(filename):
"""
Open a file, read contents, return a list of all the sources in that file.
@param filename:
@return: list of OutputSource objects
""" |
catalog = catalogs.table_to_source_list(catalogs.load_table(filename))
logging.info("read {0} sources from {1}".format(len(catalog), filename))
return catalog |
<SYSTEM_TASK:>
Convert a telescope name into a latitude
<END_TASK>
<USER_TASK:>
Description:
def scope2lat(telescope):
"""
Convert a telescope name into a latitude
returns None when the telescope is unknown.
Parameters
----------
telescope : str
Acronym (name) of telescope, eg MWA.
Returns
-------
lat : float
The latitude of the telescope.
Notes
-----
These values were taken from wikipedia so have varying precision/accuracy
""" |
scopes = {'MWA': -26.703319,
"ATCA": -30.3128,
"VLA": 34.0790,
"LOFAR": 52.9088,
"KAT7": -30.721,
"MEERKAT": -30.721,
"PAPER": -30.7224,
"GMRT": 19.096516666667,
"OOTY": 11.383404,
"ASKAP": -26.7,
"MOST": -35.3707,
"PARKES": -32.999944,
"WSRT": 52.914722,
"AMILA": 52.16977,
"AMISA": 52.164303,
"ATA": 40.817,
"CHIME": 49.321,
"CARMA": 37.28044,
"DRAO": 49.321,
"GBT": 38.433056,
"LWA": 34.07,
"ALMA": -23.019283,
"FAST": 25.6525
}
if telescope.upper() in scopes:
return scopes[telescope.upper()]
else:
log = logging.getLogger("Aegean")
log.warn("Telescope {0} is unknown".format(telescope))
log.warn("integrated fluxes may be incorrect")
return None |
<SYSTEM_TASK:>
Determine how many cores we are able to use.
<END_TASK>
<USER_TASK:>
Description:
def check_cores(cores):
"""
Determine how many cores we are able to use.
Return 1 if we are not able to make a queue via pprocess.
Parameters
----------
cores : int
The number of cores that are requested.
Returns
-------
cores : int
The number of cores available.
""" |
cores = min(multiprocessing.cpu_count(), cores)
if six.PY3:
log = logging.getLogger("Aegean")
log.info("Multi-cores not supported in python 3+, using one core")
return 1
try:
queue = pprocess.Queue(limit=cores, reuse=1)
except: # TODO: figure out what error is being thrown
cores = 1
else:
try:
_ = queue.manage(pprocess.MakeReusable(fix_shape))
except:
cores = 1
return cores |
<SYSTEM_TASK:>
Generator function.
<END_TASK>
<USER_TASK:>
Description:
def _gen_flood_wrap(self, data, rmsimg, innerclip, outerclip=None, domask=False):
"""
Generator function.
Segment an image into islands and return one island at a time.
Needs to work for entire image, and also for components within an island.
Parameters
----------
data : 2d-array
Image array.
rmsimg : 2d-array
Noise image.
innerclip, outerclip :float
Seed (inner) and flood (outer) clipping values.
domask : bool
If True then look for a region mask in globals, only return islands that are within the region.
Default = False.
Yields
------
data_box : 2d-array
A island of sources with subthreshold values masked.
xmin, xmax, ymin, ymax : int
The corners of the data_box within the initial data array.
""" |
if outerclip is None:
outerclip = innerclip
# compute SNR image (data has already been background subtracted)
snr = abs(data) / rmsimg
# mask of pixles that are above the outerclip
a = snr >= outerclip
# segmentation a la scipy
l, n = label(a)
f = find_objects(l)
if n == 0:
self.log.debug("There are no pixels above the clipping limit")
return
self.log.debug("{1} Found {0} islands total above flood limit".format(n, data.shape))
# Yield values as before, though they are not sorted by flux
for i in range(n):
xmin, xmax = f[i][0].start, f[i][0].stop
ymin, ymax = f[i][1].start, f[i][1].stop
if np.any(snr[xmin:xmax, ymin:ymax] > innerclip): # obey inner clip constraint
# self.log.info("{1} Island {0} is above the inner clip limit".format(i, data.shape))
data_box = copy.copy(data[xmin:xmax, ymin:ymax]) # copy so that we don't blank the master data
data_box[np.where(
snr[xmin:xmax, ymin:ymax] < outerclip)] = np.nan # blank pixels that are outside the outerclip
data_box[np.where(l[xmin:xmax, ymin:ymax] != i + 1)] = np.nan # blank out other summits
# check if there are any pixels left unmasked
if not np.any(np.isfinite(data_box)):
# self.log.info("{1} Island {0} has no non-masked pixels".format(i,data.shape))
continue
if domask and (self.global_data.region is not None):
y, x = np.where(snr[xmin:xmax, ymin:ymax] >= outerclip)
# convert indices of this sub region to indices in the greater image
yx = list(zip(y + ymin, x + xmin))
ra, dec = self.global_data.wcshelper.wcs.wcs_pix2world(yx, 1).transpose()
mask = self.global_data.region.sky_within(ra, dec, degin=True)
# if there are no un-masked pixels within the region then we skip this island.
if not np.any(mask):
continue
self.log.debug("Mask {0}".format(mask))
# self.log.info("{1} Island {0} will be fit".format(i, data.shape))
yield data_box, xmin, xmax, ymin, ymax |
<SYSTEM_TASK:>
Generate and save the background and RMS maps as FITS files.
<END_TASK>
<USER_TASK:>
Description:
def save_background_files(self, image_filename, hdu_index=0, bkgin=None, rmsin=None, beam=None, rms=None, bkg=None, cores=1,
outbase=None):
"""
Generate and save the background and RMS maps as FITS files.
They are saved in the current directly as aegean-background.fits and aegean-rms.fits.
Parameters
----------
image_filename : str or HDUList
Input image.
hdu_index : int
If fits file has more than one hdu, it can be specified here.
Default = 0.
bkgin, rmsin : str or HDUList
Background and noise image filename or HDUList
beam : :class:`AegeanTools.fits_image.Beam`
Beam object representing the synthsized beam. Will replace what is in the FITS header.
rms, bkg : float
A float that represents a constant rms/bkg level for the entire image.
Default = None, which causes the rms/bkg to be loaded or calculated.
cores : int
Number of cores to use if different from what is autodetected.
outbase : str
Basename for output files.
""" |
self.log.info("Saving background / RMS maps")
# load image, and load/create background/rms images
self.load_globals(image_filename, hdu_index=hdu_index, bkgin=bkgin, rmsin=rmsin, beam=beam, verb=True, rms=rms, bkg=bkg,
cores=cores, do_curve=True)
img = self.global_data.img
bkgimg, rmsimg = self.global_data.bkgimg, self.global_data.rmsimg
curve = np.array(self.global_data.dcurve, dtype=bkgimg.dtype)
# mask these arrays have the same mask the same as the data
mask = np.where(np.isnan(self.global_data.data_pix))
bkgimg[mask] = np.NaN
rmsimg[mask] = np.NaN
curve[mask] = np.NaN
# Generate the new FITS files by copying the existing HDU and assigning new data.
# This gives the new files the same WCS projection and other header fields.
new_hdu = img.hdu
# Set the ORIGIN to indicate Aegean made this file
new_hdu.header["ORIGIN"] = "Aegean {0}-({1})".format(__version__, __date__)
for c in ['CRPIX3', 'CRPIX4', 'CDELT3', 'CDELT4', 'CRVAL3', 'CRVAL4', 'CTYPE3', 'CTYPE4']:
if c in new_hdu.header:
del new_hdu.header[c]
if outbase is None:
outbase, _ = os.path.splitext(os.path.basename(image_filename))
noise_out = outbase + '_rms.fits'
background_out = outbase + '_bkg.fits'
curve_out = outbase + '_crv.fits'
snr_out = outbase + '_snr.fits'
new_hdu.data = bkgimg
new_hdu.writeto(background_out, overwrite=True)
self.log.info("Wrote {0}".format(background_out))
new_hdu.data = rmsimg
new_hdu.writeto(noise_out, overwrite=True)
self.log.info("Wrote {0}".format(noise_out))
new_hdu.data = curve
new_hdu.writeto(curve_out, overwrite=True)
self.log.info("Wrote {0}".format(curve_out))
new_hdu.data = self.global_data.data_pix / rmsimg
new_hdu.writeto(snr_out, overwrite=True)
self.log.info("Wrote {0}".format(snr_out))
return |
<SYSTEM_TASK:>
Save the image data.
<END_TASK>
<USER_TASK:>
Description:
def save_image(self, outname):
"""
Save the image data.
This is probably only useful if the image data has been blanked.
Parameters
----------
outname : str
Name for the output file.
""" |
hdu = self.global_data.img.hdu
hdu.data = self.global_data.img._pixels
hdu.header["ORIGIN"] = "Aegean {0}-({1})".format(__version__, __date__)
# delete some axes that we aren't going to need
for c in ['CRPIX3', 'CRPIX4', 'CDELT3', 'CDELT4', 'CRVAL3', 'CRVAL4', 'CTYPE3', 'CTYPE4']:
if c in hdu.header:
del hdu.header[c]
hdu.writeto(outname, overwrite=True)
self.log.info("Wrote {0}".format(outname))
return |
<SYSTEM_TASK:>
Execute fitting on a list of islands
<END_TASK>
<USER_TASK:>
Description:
def _fit_islands(self, islands):
"""
Execute fitting on a list of islands
This function just wraps around fit_island, so that when we do multiprocesing
a single process will fit multiple islands before returning results.
Parameters
----------
islands : list of :class:`AegeanTools.models.IslandFittingData`
The islands to be fit.
Returns
-------
sources : list
The sources that were fit.
""" |
self.log.debug("Fitting group of {0} islands".format(len(islands)))
sources = []
for island in islands:
res = self._fit_island(island)
sources.extend(res)
return sources |
<SYSTEM_TASK:>
Determine whether a list of files are of a recognizable output type.
<END_TASK>
<USER_TASK:>
Description:
def check_table_formats(files):
"""
Determine whether a list of files are of a recognizable output type.
Parameters
----------
files : str
A list of file names
Returns
-------
result : bool
True if *all* the file names are supported
""" |
cont = True
formats = get_table_formats()
for t in files.split(','):
_, ext = os.path.splitext(t)
ext = ext[1:].lower()
if ext not in formats:
cont = False
log.warn("Format not supported for {0} ({1})".format(t, ext))
if not cont:
log.error("Invalid table format specified.")
return cont |
<SYSTEM_TASK:>
Print a list of all the file formats that are supported for writing.
<END_TASK>
<USER_TASK:>
Description:
def show_formats():
"""
Print a list of all the file formats that are supported for writing.
The file formats are determined by their extensions.
Returns
-------
None
""" |
fmts = {
"ann": "Kvis annotation",
"reg": "DS9 regions file",
"fits": "FITS Binary Table",
"csv": "Comma separated values",
"tab": "tabe separated values",
"tex": "LaTeX table format",
"html": "HTML table",
"vot": "VO-Table",
"xml": "VO-Table",
"db": "Sqlite3 database",
"sqlite": "Sqlite3 database"}
supported = get_table_formats()
print("Extension | Description | Supported?")
for k in sorted(fmts.keys()):
print("{0:10s} {1:24s} {2}".format(k, fmts[k], k in supported))
return |
<SYSTEM_TASK:>
Load a table from a given file.
<END_TASK>
<USER_TASK:>
Description:
def load_table(filename):
"""
Load a table from a given file.
Supports csv, tab, tex, vo, vot, xml, fits, and hdf5.
Parameters
----------
filename : str
File to read
Returns
-------
table : Table
Table of data.
""" |
supported = get_table_formats()
fmt = os.path.splitext(filename)[-1][1:].lower() # extension sans '.'
if fmt in ['csv', 'tab', 'tex'] and fmt in supported:
log.info("Reading file {0}".format(filename))
t = ascii.read(filename)
elif fmt in ['vo', 'vot', 'xml', 'fits', 'hdf5'] and fmt in supported:
log.info("Reading file {0}".format(filename))
t = Table.read(filename)
else:
log.error("Table format not recognized or supported")
log.error("{0} [{1}]".format(filename, fmt))
raise Exception("Table format not recognized or supported")
return t |
<SYSTEM_TASK:>
Write a table to a file.
<END_TASK>
<USER_TASK:>
Description:
def write_table(table, filename):
"""
Write a table to a file.
Parameters
----------
table : Table
Table to be written
filename : str
Destination for saving table.
Returns
-------
None
""" |
try:
if os.path.exists(filename):
os.remove(filename)
table.write(filename)
log.info("Wrote {0}".format(filename))
except Exception as e:
if "Format could not be identified" not in e.message:
raise e
else:
fmt = os.path.splitext(filename)[-1][1:].lower() # extension sans '.'
raise Exception("Cannot auto-determine format for {0}".format(fmt))
return |
<SYSTEM_TASK:>
Convert a table of data into a list of sources.
<END_TASK>
<USER_TASK:>
Description:
def table_to_source_list(table, src_type=OutputSource):
"""
Convert a table of data into a list of sources.
A single table must have consistent source types given by src_type. src_type should be one of
:class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`,
or :class:`AegeanTools.models.IslandSource`.
Parameters
----------
table : Table
Table of sources
src_type : class
Sources must be of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
Returns
-------
sources : list
A list of objects of the given type.
""" |
source_list = []
if table is None:
return source_list
for row in table:
# Initialise our object
src = src_type()
# look for the columns required by our source object
for param in src_type.names:
if param in table.colnames:
# copy the value to our object
val = row[param]
# hack around float32's broken-ness
if isinstance(val, np.float32):
val = np.float64(val)
setattr(src, param, val)
# save this object to our list of sources
source_list.append(src)
return source_list |
<SYSTEM_TASK:>
Convert a table into a FITSTable and then write to disk.
<END_TASK>
<USER_TASK:>
Description:
def writeFITSTable(filename, table):
"""
Convert a table into a FITSTable and then write to disk.
Parameters
----------
filename : str
Filename to write.
table : Table
Table to write.
Returns
-------
None
Notes
-----
Due to a bug in numpy, `int32` and `float32` are converted to `int64` and `float64` before writing.
""" |
def FITSTableType(val):
"""
Return the FITSTable type corresponding to each named parameter in obj
"""
if isinstance(val, bool):
types = "L"
elif isinstance(val, (int, np.int64, np.int32)):
types = "J"
elif isinstance(val, (float, np.float64, np.float32)):
types = "E"
elif isinstance(val, six.string_types):
types = "{0}A".format(len(val))
else:
log.warning("Column {0} is of unknown type {1}".format(val, type(val)))
log.warning("Using 5A")
types = "5A"
return types
cols = []
for name in table.colnames:
cols.append(fits.Column(name=name, format=FITSTableType(table[name][0]), array=table[name]))
cols = fits.ColDefs(cols)
tbhdu = fits.BinTableHDU.from_columns(cols)
for k in table.meta:
tbhdu.header['HISTORY'] = ':'.join((k, table.meta[k]))
tbhdu.writeto(filename, overwrite=True) |
<SYSTEM_TASK:>
Write an output file in ds9 .reg format that outlines the boundaries of each island.
<END_TASK>
<USER_TASK:>
Description:
def writeIslandContours(filename, catalog, fmt='reg'):
"""
Write an output file in ds9 .reg format that outlines the boundaries of each island.
Parameters
----------
filename : str
Filename to write.
catalog : list
List of sources. Only those of type :class:`AegeanTools.models.IslandSource` will have contours drawn.
fmt : str
Output format type. Currently only 'reg' is supported (default)
Returns
-------
None
See Also
--------
:func:`AegeanTools.catalogs.writeIslandBoxes`
""" |
if fmt != 'reg':
log.warning("Format {0} not yet supported".format(fmt))
log.warning("not writing anything")
return
out = open(filename, 'w')
print("#Aegean island contours", file=out)
print("#AegeanTools.catalogs version {0}-({1})".format(__version__, __date__), file=out)
line_fmt = 'image;line({0},{1},{2},{3})'
text_fmt = 'fk5; text({0},{1}) # text={{{2}}}'
mas_fmt = 'image; line({1},{0},{3},{2}) #color = yellow'
x_fmt = 'image; point({1},{0}) # point=x'
for c in catalog:
contour = c.contour
if len(contour) > 1:
for p1, p2 in zip(contour[:-1], contour[1:]):
print(line_fmt.format(p1[1] + 0.5, p1[0] + 0.5, p2[1] + 0.5, p2[0] + 0.5), file=out)
print(line_fmt.format(contour[-1][1] + 0.5, contour[-1][0] + 0.5, contour[0][1] + 0.5,
contour[0][0] + 0.5), file=out)
# comment out lines that have invalid ra/dec (WCS problems)
if np.nan in [c.ra, c.dec]:
print('#', end=' ', file=out)
# some islands may not have anchors because they don't have any contours
if len(c.max_angular_size_anchors) == 4:
print(text_fmt.format(c.ra, c.dec, c.island), file=out)
print(mas_fmt.format(*[a + 0.5 for a in c.max_angular_size_anchors]), file=out)
for p1, p2 in c.pix_mask:
# DS9 uses 1-based instead of 0-based indexing
print(x_fmt.format(p1 + 1, p2 + 1), file=out)
out.close()
return |
<SYSTEM_TASK:>
Write an output file in ds9 .reg, or kvis .ann format that contains bounding boxes for all the islands.
<END_TASK>
<USER_TASK:>
Description:
def writeIslandBoxes(filename, catalog, fmt):
"""
Write an output file in ds9 .reg, or kvis .ann format that contains bounding boxes for all the islands.
Parameters
----------
filename : str
Filename to write.
catalog : list
List of sources. Only those of type :class:`AegeanTools.models.IslandSource` will have contours drawn.
fmt : str
Output format type. Currently only 'reg' and 'ann' are supported. Default = 'reg'.
Returns
-------
None
See Also
--------
:func:`AegeanTools.catalogs.writeIslandContours`
""" |
if fmt not in ['reg', 'ann']:
log.warning("Format not supported for island boxes{0}".format(fmt))
return # fmt not supported
out = open(filename, 'w')
print("#Aegean Islands", file=out)
print("#Aegean version {0}-({1})".format(__version__, __date__), file=out)
if fmt == 'reg':
print("IMAGE", file=out)
box_fmt = 'box({0},{1},{2},{3}) #{4}'
else:
print("COORD P", file=out)
box_fmt = 'box P {0} {1} {2} {3} #{4}'
for c in catalog:
# x/y swap for pyfits/numpy translation
ymin, ymax, xmin, xmax = c.extent
# +1 for array/image offset
xcen = (xmin + xmax) / 2.0 + 1
# + 0.5 in each direction to make lines run 'between' DS9 pixels
xwidth = xmax - xmin + 1
ycen = (ymin + ymax) / 2.0 + 1
ywidth = ymax - ymin + 1
print(box_fmt.format(xcen, ycen, xwidth, ywidth, c.island), file=out)
out.close()
return |
<SYSTEM_TASK:>
Output an sqlite3 database containing one table for each source type
<END_TASK>
<USER_TASK:>
Description:
def writeDB(filename, catalog, meta=None):
"""
Output an sqlite3 database containing one table for each source type
Parameters
----------
filename : str
Output filename
catalog : list
List of sources of type :class:`AegeanTools.models.OutputSource`,
:class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`.
meta : dict
Meta data to be written to table `meta`
Returns
-------
None
""" |
def sqlTypes(obj, names):
"""
Return the sql type corresponding to each named parameter in obj
"""
types = []
for n in names:
val = getattr(obj, n)
if isinstance(val, bool):
types.append("BOOL")
elif isinstance(val, (int, np.int64, np.int32)):
types.append("INT")
elif isinstance(val, (float, np.float64, np.float32)): # float32 is bugged and claims not to be a float
types.append("FLOAT")
elif isinstance(val, six.string_types):
types.append("VARCHAR")
else:
log.warning("Column {0} is of unknown type {1}".format(n, type(n)))
log.warning("Using VARCHAR")
types.append("VARCHAR")
return types
if os.path.exists(filename):
log.warning("overwriting {0}".format(filename))
os.remove(filename)
conn = sqlite3.connect(filename)
db = conn.cursor()
# determine the column names by inspecting the catalog class
for t, tn in zip(classify_catalog(catalog), ["components", "islands", "simples"]):
if len(t) < 1:
continue #don't write empty tables
col_names = t[0].names
col_types = sqlTypes(t[0], col_names)
stmnt = ','.join(["{0} {1}".format(a, b) for a, b in zip(col_names, col_types)])
db.execute('CREATE TABLE {0} ({1})'.format(tn, stmnt))
stmnt = 'INSERT INTO {0} ({1}) VALUES ({2})'.format(tn, ','.join(col_names), ','.join(['?' for i in col_names]))
# expend the iterators that are created by python 3+
data = list(map(nulls, list(r.as_list() for r in t)))
db.executemany(stmnt, data)
log.info("Created table {0}".format(tn))
# metadata add some meta data
db.execute("CREATE TABLE meta (key VARCHAR, val VARCHAR)")
for k in meta:
db.execute("INSERT INTO meta (key, val) VALUES (?,?)", (k, meta[k]))
conn.commit()
log.info(db.execute("SELECT name FROM sqlite_master WHERE type='table';").fetchall())
conn.close()
log.info("Wrote file {0}".format(filename))
return |
<SYSTEM_TASK:>
Calculate the normalised distance between two sources.
<END_TASK>
<USER_TASK:>
Description:
def norm_dist(src1, src2):
"""
Calculate the normalised distance between two sources.
Sources are elliptical Gaussians.
The normalised distance is calculated as the GCD distance between the centers,
divided by quadrature sum of the radius of each ellipse along a line joining the two ellipses.
For ellipses that touch at a single point, the normalized distance will be 1/sqrt(2).
Parameters
----------
src1, src2 : object
The two positions to compare. Objects must have the following parameters: (ra, dec, a, b, pa).
Returns
-------
dist: float
The normalised distance.
""" |
if np.all(src1 == src2):
return 0
dist = gcd(src1.ra, src1.dec, src2.ra, src2.dec) # degrees
# the angle between the ellipse centers
phi = bear(src1.ra, src1.dec, src2.ra, src2.dec) # Degrees
# Calculate the radius of each ellipse along a line that joins their centers.
r1 = src1.a*src1.b / np.hypot(src1.a * np.sin(np.radians(phi - src1.pa)),
src1.b * np.cos(np.radians(phi - src1.pa)))
r2 = src2.a*src2.b / np.hypot(src2.a * np.sin(np.radians(180 + phi - src2.pa)),
src2.b * np.cos(np.radians(180 + phi - src2.pa)))
R = dist / (np.hypot(r1, r2) / 3600)
return R |
<SYSTEM_TASK:>
Great circle distance between two sources.
<END_TASK>
<USER_TASK:>
Description:
def sky_dist(src1, src2):
"""
Great circle distance between two sources.
A check is made to determine if the two sources are the same object, in this case
the distance is zero.
Parameters
----------
src1, src2 : object
Two sources to check. Objects must have parameters (ra,dec) in degrees.
Returns
-------
distance : float
The distance between the two sources.
See Also
--------
:func:`AegeanTools.angle_tools.gcd`
""" |
if np.all(src1 == src2):
return 0
return gcd(src1.ra, src1.dec, src2.ra, src2.dec) |
<SYSTEM_TASK:>
Do a pairwise comparison of all sources and determine if they have a normalized distance within
<END_TASK>
<USER_TASK:>
Description:
def pairwise_ellpitical_binary(sources, eps, far=None):
"""
Do a pairwise comparison of all sources and determine if they have a normalized distance within
eps.
Form this into a matrix of shape NxN.
Parameters
----------
sources : list
A list of sources (objects with parameters: ra,dec,a,b,pa)
eps : float
Normalised distance constraint.
far : float
If sources have a dec that differs by more than this amount then they are considered to be not matched.
This is a short-cut around performing GCD calculations.
Returns
-------
prob : numpy.ndarray
A 2d array of True/False.
See Also
--------
:func:`AegeanTools.cluster.norm_dist`
""" |
if far is None:
far = max(a.a/3600 for a in sources)
l = len(sources)
distances = np.zeros((l, l), dtype=bool)
for i in range(l):
for j in range(i, l):
if i == j:
distances[i, j] = False
continue
src1 = sources[i]
src2 = sources[j]
if src2.dec - src1.dec > far:
break
if abs(src2.ra - src1.ra)*np.cos(np.radians(src1.dec)) > far:
continue
distances[i, j] = norm_dist(src1, src2) > eps
distances[j, i] = distances[i, j]
return distances |
<SYSTEM_TASK:>
Regroup the islands of a catalog according to their normalised distance.
<END_TASK>
<USER_TASK:>
Description:
def regroup_vectorized(srccat, eps, far=None, dist=norm_dist):
"""
Regroup the islands of a catalog according to their normalised distance.
Assumes srccat is recarray-like for efficiency.
Return a list of island groups.
Parameters
----------
srccat : np.rec.arry or pd.DataFrame
Should have the following fields[units]:
ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any]
eps : float
maximum normalised distance within which sources are considered to be
grouped
far : float
(degrees) sources that are further than this distance apart will not
be grouped, and will not be tested.
Default = 0.5.
dist : func
a function that calculates the distance between a source and each
element of an array of sources.
Default = :func:`AegeanTools.cluster.norm_dist`
Returns
-------
islands : list of lists
Each island contians integer indices for members from srccat
(in descending dec order).
""" |
if far is None:
far = 0.5 # 10*max(a.a/3600 for a in srccat)
# most negative declination first
# XXX: kind='mergesort' ensures stable sorting for determinism.
# Do we need this?
order = np.argsort(srccat.dec, kind='mergesort')[::-1]
# TODO: is it better to store groups as arrays even if appends are more
# costly?
groups = [[order[0]]]
for idx in order[1:]:
rec = srccat[idx]
# TODO: Find out if groups are big enough for this to give us a speed
# gain. If not, get distance to all entries in groups above
# decmin simultaneously.
decmin = rec.dec - far
for group in reversed(groups):
# when an island's largest (last) declination is smaller than
# decmin, we don't need to look at any more islands
if srccat.dec[group[-1]] < decmin:
# new group
groups.append([idx])
rafar = far / np.cos(np.radians(rec.dec))
group_recs = np.take(srccat, group, mode='clip')
group_recs = group_recs[abs(rec.ra - group_recs.ra) <= rafar]
if len(group_recs) and dist(rec, group_recs).min() < eps:
group.append(idx)
break
else:
# new group
groups.append([idx])
# TODO?: a more numpy-like interface would return only an array providing
# the mapping:
# group_idx = np.empty(len(srccat), dtype=int)
# for i, group in enumerate(groups):
# group_idx[group] = i
# return group_idx
return groups |
<SYSTEM_TASK:>
Load a file from disk and return an HDUList
<END_TASK>
<USER_TASK:>
Description:
def load_file_or_hdu(filename):
"""
Load a file from disk and return an HDUList
If filename is already an HDUList return that instead
Parameters
----------
filename : str or HDUList
File or HDU to be loaded
Returns
-------
hdulist : HDUList
""" |
if isinstance(filename, fits.HDUList):
hdulist = filename
else:
hdulist = fits.open(filename, ignore_missing_end=True)
return hdulist |
<SYSTEM_TASK:>
Compress a file using decimation.
<END_TASK>
<USER_TASK:>
Description:
def compress(datafile, factor, outfile=None):
"""
Compress a file using decimation.
Parameters
----------
datafile : str or HDUList
Input data to be loaded. (HDUList will be modified if passed).
factor : int
Decimation factor.
outfile : str
File to be written. Default = None, which means don't write a file.
Returns
-------
hdulist : HDUList
A decimated HDUList
See Also
--------
:func:`AegeanTools.fits_interp.expand`
""" |
if not (factor > 0 and isinstance(factor, int)):
logging.error("factor must be a positive integer")
return None
hdulist = load_file_or_hdu(datafile)
header = hdulist[0].header
data = np.squeeze(hdulist[0].data)
cx, cy = data.shape[0], data.shape[1]
nx = cx // factor
ny = cy // factor
# check to see if we will have some residual data points
lcx = cx % factor
lcy = cy % factor
if lcx > 0:
nx += 1
if lcy > 0:
ny += 1
# decimate the data
new_data = np.empty((nx + 1, ny + 1))
new_data[:nx, :ny] = data[::factor, ::factor]
# copy the last row/col across
new_data[-1, :ny] = data[-1, ::factor]
new_data[:nx, -1] = data[::factor, -1]
new_data[-1, -1] = data[-1, -1]
# TODO: Figure out what to do when CD2_1 and CD1_2 are non-zero
if 'CDELT1' in header:
header['CDELT1'] *= factor
elif 'CD1_1' in header:
header['CD1_1'] *= factor
else:
logging.error("Error: Can't find CDELT1 or CD1_1")
return None
if 'CDELT2' in header:
header['CDELT2'] *= factor
elif "CD2_2" in header:
header['CD2_2'] *= factor
else:
logging.error("Error: Can't find CDELT2 or CD2_2")
return None
# Move the reference pixel so that the WCS is correct
header['CRPIX1'] = (header['CRPIX1'] + factor - 1) / factor
header['CRPIX2'] = (header['CRPIX2'] + factor - 1) / factor
# Update the header so that we can do the correct interpolation later on
header['BN_CFAC'] = (factor, "Compression factor (grid size) used by BANE")
header['BN_NPX1'] = (header['NAXIS1'], 'original NAXIS1 value')
header['BN_NPX2'] = (header['NAXIS2'], 'original NAXIS2 value')
header['BN_RPX1'] = (lcx, 'Residual on axis 1')
header['BN_RPX2'] = (lcy, 'Residual on axis 2')
header['HISTORY'] = "Compressed by a factor of {0}".format(factor)
# save the changes
hdulist[0].data = np.array(new_data, dtype=np.float32)
hdulist[0].header = header
if outfile is not None:
hdulist.writeto(outfile, overwrite=True)
logging.info("Wrote: {0}".format(outfile))
return hdulist |
<SYSTEM_TASK:>
Expand and interpolate the given data file using the given method.
<END_TASK>
<USER_TASK:>
Description:
def expand(datafile, outfile=None):
"""
Expand and interpolate the given data file using the given method.
Datafile can be a filename or an HDUList
It is assumed that the file has been compressed and that there are `BN_?` keywords in the
fits header that describe how the compression was done.
Parameters
----------
datafile : str or HDUList
filename or HDUList of file to work on
outfile : str
filename to write to (default = None)
Returns
-------
hdulist : HDUList
HDUList of the expanded data.
See Also
--------
:func:`AegeanTools.fits_interp.compress`
""" |
hdulist = load_file_or_hdu(datafile)
header = hdulist[0].header
data = hdulist[0].data
# Check for the required key words, only expand if they exist
if not all(a in header for a in ['BN_CFAC', 'BN_NPX1', 'BN_NPX2', 'BN_RPX1', 'BN_RPX2']):
return hdulist
factor = header['BN_CFAC']
(gx, gy) = np.mgrid[0:header['BN_NPX2'], 0:header['BN_NPX1']]
# fix the last column of the grid to account for residuals
lcx = header['BN_RPX2']
lcy = header['BN_RPX1']
rows = (np.arange(data.shape[0]) + int(lcx/factor))*factor
cols = (np.arange(data.shape[1]) + int(lcy/factor))*factor
# Do the interpolation
hdulist[0].data = np.array(RegularGridInterpolator((rows,cols), data)((gx, gy)), dtype=np.float32)
# update the fits keywords so that the WCS is correct
header['CRPIX1'] = (header['CRPIX1'] - 1) * factor + 1
header['CRPIX2'] = (header['CRPIX2'] - 1) * factor + 1
if 'CDELT1' in header:
header['CDELT1'] /= factor
elif 'CD1_1' in header:
header['CD1_1'] /= factor
else:
logging.error("Error: Can't find CD1_1 or CDELT1")
return None
if 'CDELT2' in header:
header['CDELT2'] /= factor
elif "CD2_2" in header:
header['CD2_2'] /= factor
else:
logging.error("Error: Can't find CDELT2 or CD2_2")
return None
header['HISTORY'] = 'Expanded by factor {0}'.format(factor)
# don't need these any more so delete them.
del header['BN_CFAC'], header['BN_NPX1'], header['BN_NPX2'], header['BN_RPX1'], header['BN_RPX2']
hdulist[0].header = header
if outfile is not None:
hdulist.writeto(outfile, overwrite=True)
logging.info("Wrote: {0}".format(outfile))
return hdulist |
<SYSTEM_TASK:>
Strip and make a string case insensitive and ensure it is either 'true' or 'false'.
<END_TASK>
<USER_TASK:>
Description:
def change_autocommit_mode(self, switch):
"""
Strip and make a string case insensitive and ensure it is either 'true' or 'false'.
If neither, prompt user for either value.
When 'true', return True, and when 'false' return False.
""" |
parsed_switch = switch.strip().lower()
if not parsed_switch in ['true', 'false']:
self.send_response(
self.iopub_socket, 'stream', {
'name': 'stderr',
'text': 'autocommit must be true or false.\n\n'
}
)
switch_bool = (parsed_switch == 'true')
committed = self.switch_autocommit(switch_bool)
message = (
'committed current transaction & ' if committed else '' +
'switched autocommit mode to ' +
str(self._autocommit)
)
self.send_response(
self.iopub_socket, 'stream', {
'name': 'stderr',
'text': message,
}
) |
<SYSTEM_TASK:>
Deconstruct the field for Django 1.7+ migrations.
<END_TASK>
<USER_TASK:>
Description:
def deconstruct(self):
"""
Deconstruct the field for Django 1.7+ migrations.
""" |
name, path, args, kwargs = super(BaseEncryptedField, self).deconstruct()
kwargs.update({
#'key': self.cipher_key,
'cipher': self.cipher_name,
'charset': self.charset,
'check_armor': self.check_armor,
'versioned': self.versioned,
})
return name, path, args, kwargs |
<SYSTEM_TASK:>
Better than excluding everything that is not needed,
<END_TASK>
<USER_TASK:>
Description:
def find_packages_by_root_package(where):
"""Better than excluding everything that is not needed,
collect only what is needed.
""" |
root_package = os.path.basename(where)
packages = [ "%s.%s" % (root_package, sub_package)
for sub_package in find_packages(where)]
packages.insert(0, root_package)
return packages |
<SYSTEM_TASK:>
click_ is a framework to simplify writing composable commands for
<END_TASK>
<USER_TASK:>
Description:
def make_long_description(marker=None, intro=None):
"""
click_ is a framework to simplify writing composable commands for
command-line tools. This package extends the click_ functionality
by adding support for commands that use configuration files.
.. _click: https://click.pocoo.org/
EXAMPLE:
A configuration file, like:
.. code-block:: INI
# -- FILE: foo.ini
[foo]
flag = yes
name = Alice and Bob
numbers = 1 4 9 16 25
filenames = foo/xxx.txt
bar/baz/zzz.txt
[person.alice]
name = Alice
birthyear = 1995
[person.bob]
name = Bob
birthyear = 2001
can be processed with:
.. code-block:: python
# EXAMPLE:
""" |
if intro is None:
intro = inspect.getdoc(make_long_description)
with open("README.rst", "r") as infile:
line = infile.readline()
while not line.strip().startswith(marker):
line = infile.readline()
# -- COLLECT REMAINING: Usage example
contents = infile.read()
text = intro +"\n" + contents
return text |
<SYSTEM_TASK:>
Pops a message for a subscribed client.
<END_TASK>
<USER_TASK:>
Description:
def pubsub_pop_message(self, deadline=None):
"""Pops a message for a subscribed client.
Args:
deadline (int): max number of seconds to wait (None => no timeout)
Returns:
Future with the popped message as result (or None if timeout
or ConnectionError object in case of connection errors
or ClientError object if you are not subscribed)
""" |
if not self.subscribed:
excep = ClientError("you must subscribe before using "
"pubsub_pop_message")
raise tornado.gen.Return(excep)
reply = None
try:
reply = self._reply_list.pop(0)
raise tornado.gen.Return(reply)
except IndexError:
pass
if deadline is not None:
td = timedelta(seconds=deadline)
yield self._condition.wait(timeout=td)
else:
yield self._condition.wait()
try:
reply = self._reply_list.pop(0)
except IndexError:
pass
raise tornado.gen.Return(reply) |
<SYSTEM_TASK:>
This is a helper function to recover the coordinates of regions that have
<END_TASK>
<USER_TASK:>
Description:
def _get_flat_ids(assigned):
"""
This is a helper function to recover the coordinates of regions that have
been labeled within an image. This function efficiently computes the
coordinate of all regions and returns the information in a memory-efficient
manner.
Parameters
-----------
assigned : ndarray[ndim=2, dtype=int]
The labeled image. For example, the result of calling
scipy.ndimage.label on a binary image
Returns
--------
I : ndarray[ndim=1, dtype=int]
Array of 1d coordinate indices of all regions in the image
region_ids : ndarray[shape=[n_features + 1], dtype=int]
Indexing array used to separate the coordinates of the different
regions. For example, region k has xy coordinates of
xy[region_ids[k]:region_ids[k+1], :]
labels : ndarray[ndim=1, dtype=int]
The labels of the regions in the image corresponding to the coordinates
For example, assigned.ravel()[I[k]] == labels[k]
""" |
# MPU optimization:
# Let's segment the regions and store in a sparse format
# First, let's use where once to find all the information we want
ids_labels = np.arange(len(assigned.ravel()), 'int64')
I = ids_labels[assigned.ravel().astype(bool)]
labels = assigned.ravel()[I]
# Now sort these arrays by the label to figure out where to segment
sort_id = np.argsort(labels)
labels = labels[sort_id]
I = I[sort_id]
# this should be of size n_features-1
region_ids = np.where(labels[1:] - labels[:-1] > 0)[0] + 1
# This should be of size n_features + 1
region_ids = np.concatenate(([0], region_ids, [len(labels)]))
return [I, region_ids, labels] |
<SYSTEM_TASK:>
This function gives the magnitude and direction of the slope based on
<END_TASK>
<USER_TASK:>
Description:
def _calc_direction(data, mag, direction, ang, d1, d2, theta,
slc0, slc1, slc2):
"""
This function gives the magnitude and direction of the slope based on
Tarboton's D_\infty method. This is a helper-function to
_tarboton_slopes_directions
""" |
data0 = data[slc0]
data1 = data[slc1]
data2 = data[slc2]
s1 = (data0 - data1) / d1
s2 = (data1 - data2) / d2
s1_2 = s1**2
sd = (data0 - data2) / np.sqrt(d1**2 + d2**2)
r = np.arctan2(s2, s1)
rad2 = s1_2 + s2**2
# Handle special cases
# should be on diagonal
b_s1_lte0 = s1 <= 0
b_s2_lte0 = s2 <= 0
b_s1_gt0 = s1 > 0
b_s2_gt0 = s2 > 0
I1 = (b_s1_lte0 & b_s2_gt0) | (r > theta)
if I1.any():
rad2[I1] = sd[I1] ** 2
r[I1] = theta.repeat(I1.shape[1], 1)[I1]
I2 = (b_s1_gt0 & b_s2_lte0) | (r < 0) # should be on straight section
if I2.any():
rad2[I2] = s1_2[I2]
r[I2] = 0
I3 = b_s1_lte0 & (b_s2_lte0 | (b_s2_gt0 & (sd <= 0))) # upslope or flat
rad2[I3] = -1
I4 = rad2 > mag[slc0]
if I4.any():
mag[slc0][I4] = rad2[I4]
direction[slc0][I4] = r[I4] * ang[1] + ang[0] * np.pi/2
return mag, direction |
<SYSTEM_TASK:>
Assigns data on the i'th tile to the data 'field' of the 'side'
<END_TASK>
<USER_TASK:>
Description:
def set_i(self, i, data, field, side):
""" Assigns data on the i'th tile to the data 'field' of the 'side'
edge of that tile
""" |
edge = self.get_i(i, side)
setattr(edge, field, data[edge.slice]) |
<SYSTEM_TASK:>
Assign data on the 'key' tile to all the edges
<END_TASK>
<USER_TASK:>
Description:
def set_sides(self, key, data, field, local=False):
"""
Assign data on the 'key' tile to all the edges
""" |
for side in ['left', 'right', 'top', 'bottom']:
self.set(key, data, field, side, local) |
<SYSTEM_TASK:>
Assign data from the 'key' tile to the edge on the
<END_TASK>
<USER_TASK:>
Description:
def set_neighbor_data(self, neighbor_side, data, key, field):
"""
Assign data from the 'key' tile to the edge on the
neighboring tile which is on the 'neighbor_side' of the 'key' tile.
The data is assigned to the 'field' attribute of the neihboring tile's
edge.
""" |
i = self.keys[key]
found = False
sides = []
if 'left' in neighbor_side:
if i % self.n_cols == 0:
return None
i -= 1
sides.append('right')
found = True
if 'right' in neighbor_side:
if i % self.n_cols == self.n_cols - 1:
return None
i += 1
sides.append('left')
found = True
if 'top' in neighbor_side:
sides.append('bottom')
i -= self.n_cols
found = True
if 'bottom' in neighbor_side:
sides.append('top')
i += self.n_cols
found = True
if not found:
print "Side '%s' not found" % neighbor_side
# Check if i is in range
if i < 0 or i >= self.n_chunks:
return None
# Otherwise, set the data
for side in sides:
self.set_i(i, data, field, side) |
<SYSTEM_TASK:>
Given they 'key' tile's data, assigns this information to all
<END_TASK>
<USER_TASK:>
Description:
def set_all_neighbors_data(self, data, done, key):
"""
Given they 'key' tile's data, assigns this information to all
neighboring tiles
""" |
# The order of this for loop is important because the topleft gets
# it's data from the left neighbor, which should have already been
# updated...
for side in ['left', 'right', 'top', 'bottom', 'topleft',
'topright', 'bottomleft', 'bottomright']:
self.set_neighbor_data(side, data, key, 'data')
# self.set_neighbor_data(side, todo, key, 'todo')
self.set_neighbor_data(side, done, key, 'done') |
<SYSTEM_TASK:>
Calculate and record the number of edge pixels left to do on each tile
<END_TASK>
<USER_TASK:>
Description:
def fill_n_todo(self):
"""
Calculate and record the number of edge pixels left to do on each tile
""" |
left = self.left
right = self.right
top = self.top
bottom = self.bottom
for i in xrange(self.n_chunks):
self.n_todo.ravel()[i] = np.sum([left.ravel()[i].n_todo,
right.ravel()[i].n_todo,
top.ravel()[i].n_todo,
bottom.ravel()[i].n_todo]) |
<SYSTEM_TASK:>
Calculate and record the number of edge pixels that are done one each
<END_TASK>
<USER_TASK:>
Description:
def fill_n_done(self):
"""
Calculate and record the number of edge pixels that are done one each
tile.
""" |
left = self.left
right = self.right
top = self.top
bottom = self.bottom
for i in xrange(self.n_chunks):
self.n_done.ravel()[i] = np.sum([left.ravel()[i].n_done,
right.ravel()[i].n_done,
top.ravel()[i].n_done,
bottom.ravel()[i].n_done]) |
<SYSTEM_TASK:>
Calculate the percentage of edge pixels that would be done if the tile
<END_TASK>
<USER_TASK:>
Description:
def fill_percent_done(self):
"""
Calculate the percentage of edge pixels that would be done if the tile
was reprocessed. This is done for each tile.
""" |
left = self.left
right = self.right
top = self.top
bottom = self.bottom
for i in xrange(self.n_chunks):
self.percent_done.ravel()[i] = \
np.sum([left.ravel()[i].percent_done,
right.ravel()[i].percent_done,
top.ravel()[i].percent_done,
bottom.ravel()[i].percent_done])
self.percent_done.ravel()[i] /= \
np.sum([left.ravel()[i].percent_done > 0,
right.ravel()[i].percent_done > 0,
top.ravel()[i].percent_done > 0,
bottom.ravel()[i].percent_done > 0, 1e-16]) |
<SYSTEM_TASK:>
Fixes the shape of the data fields on edges. Left edges should be
<END_TASK>
<USER_TASK:>
Description:
def fix_shapes(self):
"""
Fixes the shape of the data fields on edges. Left edges should be
column vectors, and top edges should be row vectors, for example.
""" |
for i in xrange(self.n_chunks):
for side in ['left', 'right', 'top', 'bottom']:
edge = getattr(self, side).ravel()[i]
if side in ['left', 'right']:
shp = [edge.todo.size, 1]
else:
shp = [1, edge.todo.size]
edge.done = edge.done.reshape(shp)
edge.data = edge.data.reshape(shp)
edge.todo = edge.todo.reshape(shp) |
<SYSTEM_TASK:>
Determine which tile, when processed, would complete the largest
<END_TASK>
<USER_TASK:>
Description:
def find_best_candidate(self):
"""
Determine which tile, when processed, would complete the largest
percentage of unresolved edge pixels. This is a heuristic function
and does not give the optimal tile.
""" |
self.fill_percent_done()
i_b = np.argmax(self.percent_done.ravel())
if self.percent_done.ravel()[i_b] <= 0:
return None
# check for ties
I = self.percent_done.ravel() == self.percent_done.ravel()[i_b]
if I.sum() == 1:
return i_b
else:
I2 = np.argmax(self.max_elev.ravel()[I])
return I.nonzero()[0][I2] |
<SYSTEM_TASK:>
Standard array saving routine
<END_TASK>
<USER_TASK:>
Description:
def save_array(self, array, name=None, partname=None, rootpath='.',
raw=False, as_int=True):
"""
Standard array saving routine
Parameters
-----------
array : array
Array to save to file
name : str, optional
Default 'array.tif'. Filename of array to save. Over-writes
partname.
partname : str, optional
Part of the filename to save (with the coordinates appended)
rootpath : str, optional
Default '.'. Which directory to save file
raw : bool, optional
Default False. If true will save a .npz of the array. If false,
will save a geotiff
as_int : bool, optional
Default True. If true will save array as an integer array (
excellent compression). If false will save as float array.
""" |
if name is None and partname is not None:
fnl_file = self.get_full_fn(partname, rootpath)
tmp_file = os.path.join(rootpath, partname,
self.get_fn(partname + '_tmp'))
elif name is not None:
fnl_file = name
tmp_file = fnl_file + '_tmp.tiff'
else:
fnl_file = 'array.tif'
if not raw:
s_file = self.elev.clone_traits()
s_file.raster_data = np.ma.masked_array(array)
count = 10
while count > 0 and (s_file.raster_data.mask.sum() > 0 \
or np.isnan(s_file.raster_data).sum() > 0):
s_file.inpaint()
count -= 1
s_file.export_to_geotiff(tmp_file)
if as_int:
cmd = "gdalwarp -multi -wm 2000 -co BIGTIFF=YES -of GTiff -co compress=lzw -ot Int16 -co TILED=YES -wo OPTIMIZE_SIZE=YES -r near -t_srs %s %s %s" \
% (self.save_projection, tmp_file, fnl_file)
else:
cmd = "gdalwarp -multi -wm 2000 -co BIGTIFF=YES -of GTiff -co compress=lzw -co TILED=YES -wo OPTIMIZE_SIZE=YES -r near -t_srs %s %s %s" \
% (self.save_projection, tmp_file, fnl_file)
print "<<"*4, cmd, ">>"*4
subprocess.call(cmd)
os.remove(tmp_file)
else:
np.savez_compressed(fnl_file, array) |
<SYSTEM_TASK:>
Saves the upstream contributing area to a file
<END_TASK>
<USER_TASK:>
Description:
def save_uca(self, rootpath, raw=False, as_int=False):
""" Saves the upstream contributing area to a file
""" |
self.save_array(self.uca, None, 'uca', rootpath, raw, as_int=as_int) |
<SYSTEM_TASK:>
Saves the topographic wetness index to a file
<END_TASK>
<USER_TASK:>
Description:
def save_twi(self, rootpath, raw=False, as_int=True):
""" Saves the topographic wetness index to a file
""" |
self.twi = np.ma.masked_array(self.twi, mask=self.twi <= 0,
fill_value=-9999)
# self.twi = self.twi.filled()
self.twi[self.flats] = 0
self.twi.mask[self.flats] = True
# self.twi = self.flats
self.save_array(self.twi, None, 'twi', rootpath, raw, as_int=as_int) |
<SYSTEM_TASK:>
Saves the magnitude of the slope to a file
<END_TASK>
<USER_TASK:>
Description:
def save_slope(self, rootpath, raw=False, as_int=False):
""" Saves the magnitude of the slope to a file
""" |
self.save_array(self.mag, None, 'mag', rootpath, raw, as_int=as_int) |
<SYSTEM_TASK:>
Saves the direction of the slope to a file
<END_TASK>
<USER_TASK:>
Description:
def save_direction(self, rootpath, raw=False, as_int=False):
""" Saves the direction of the slope to a file
""" |
self.save_array(self.direction, None, 'ang', rootpath, raw, as_int=as_int) |
<SYSTEM_TASK:>
Saves TWI, UCA, magnitude and direction of slope to files.
<END_TASK>
<USER_TASK:>
Description:
def save_outputs(self, rootpath='.', raw=False):
"""Saves TWI, UCA, magnitude and direction of slope to files.
""" |
self.save_twi(rootpath, raw)
self.save_uca(rootpath, raw)
self.save_slope(rootpath, raw)
self.save_direction(rootpath, raw) |
<SYSTEM_TASK:>
Can only load files that were saved in the 'raw' format.
<END_TASK>
<USER_TASK:>
Description:
def load_array(self, fn, name):
"""
Can only load files that were saved in the 'raw' format.
Loads previously computed field 'name' from file
Valid names are 'mag', 'direction', 'uca', 'twi'
""" |
if os.path.exists(fn + '.npz'):
array = np.load(fn + '.npz')
try:
setattr(self, name, array['arr_0'])
except Exception, e:
print e
finally:
array.close()
else:
raise RuntimeError("File %s does not exist." % (fn + '.npz')) |
<SYSTEM_TASK:>
Assign data from a chunk to the full array. The data in overlap regions
<END_TASK>
<USER_TASK:>
Description:
def _assign_chunk(self, data, arr1, arr2, te, be, le, re, ovr, add=False):
"""
Assign data from a chunk to the full array. The data in overlap regions
will not be assigned to the full array
Parameters
-----------
data : array
Unused array (except for shape) that has size of full tile
arr1 : array
Full size array to which data will be assigned
arr2 : array
Chunk-sized array from which data will be assigned
te : int
Top edge id
be : int
Bottom edge id
le : int
Left edge id
re : int
Right edge id
ovr : int
The number of pixels in the overlap
add : bool, optional
Default False. If true, the data in arr2 will be added to arr1,
otherwise data in arr2 will overwrite data in arr1
""" |
if te == 0:
i1 = 0
else:
i1 = ovr
if be == data.shape[0]:
i2 = 0
i2b = None
else:
i2 = -ovr
i2b = -ovr
if le == 0:
j1 = 0
else:
j1 = ovr
if re == data.shape[1]:
j2 = 0
j2b = None
else:
j2 = -ovr
j2b = -ovr
if add:
arr1[te+i1:be+i2, le+j1:re+j2] += arr2[i1:i2b, j1:j2b]
else:
arr1[te+i1:be+i2, le+j1:re+j2] = arr2[i1:i2b, j1:j2b] |
<SYSTEM_TASK:>
Wrapper to pick between various algorithms
<END_TASK>
<USER_TASK:>
Description:
def _slopes_directions(self, data, dX, dY, method='tarboton'):
""" Wrapper to pick between various algorithms
""" |
# %%
if method == 'tarboton':
return self._tarboton_slopes_directions(data, dX, dY)
elif method == 'central':
return self._central_slopes_directions(data, dX, dY) |
<SYSTEM_TASK:>
Extend flats 1 square downstream
<END_TASK>
<USER_TASK:>
Description:
def _find_flats_edges(self, data, mag, direction):
"""
Extend flats 1 square downstream
Flats on the downstream side of the flat might find a valid angle,
but that doesn't mean that it's a correct angle. We have to find
these and then set them equal to a flat
""" |
i12 = np.arange(data.size).reshape(data.shape)
flat = mag == FLAT_ID_INT
flats, n = spndi.label(flat, structure=FLATS_KERNEL3)
objs = spndi.find_objects(flats)
f = flat.ravel()
d = data.ravel()
for i, _obj in enumerate(objs):
region = flats[_obj] == i+1
I = i12[_obj][region]
J = get_adjacent_index(I, data.shape, data.size)
f[J] = d[J] == d[I[0]]
flat = f.reshape(data.shape)
return flat |
<SYSTEM_TASK:>
Does a single step of the upstream contributing area calculation.
<END_TASK>
<USER_TASK:>
Description:
def _drain_step(self, A, ids, area, done, edge_todo):
"""
Does a single step of the upstream contributing area calculation.
Here the pixels in ids are drained downstream, the areas are updated
and the next set of pixels to drain are determined for the next round.
""" |
# Only drain to cells that have a contribution
A_todo = A[:, ids.ravel()]
colsum = np.array(A_todo.sum(1)).ravel()
# Only touch cells that actually receive a contribution
# during this stage
ids_new = colsum != 0
# Is it possible that I may drain twice from my own cell?
# -- No, I don't think so...
# Is it possible that other cells may drain into me in
# multiple iterations -- yes
# Then say I check for when I'm done ensures that I don't drain until
# everyone has drained into me
area.ravel()[ids_new] += (A_todo[ids_new, :]
* (area.ravel()[ids].ravel()))
edge_todo.ravel()[ids_new] += (A_todo[ids_new, :]
* (edge_todo.ravel()[ids].ravel()))
# Figure out what's left to do.
done.ravel()[ids] = True
colsum = A * (~done.ravel())
ids = colsum == 0
# Figure out the new-undrained ids
ids = ids & (~done.ravel())
return ids, area, done, edge_todo |
<SYSTEM_TASK:>
Calculates the adjacency of connectivity matrix. This matrix tells
<END_TASK>
<USER_TASK:>
Description:
def _mk_adjacency_matrix(self, section, proportion, flats, elev, mag, dX, dY):
"""
Calculates the adjacency of connectivity matrix. This matrix tells
which pixels drain to which.
For example, the pixel i, will recieve area from np.nonzero(A[i, :])
at the proportions given in A[i, :]. So, the row gives the pixel
drain to, and the columns the pixels drained from.
""" |
shp = section.shape
mat_data = np.row_stack((proportion, 1 - proportion))
NN = np.prod(shp)
i12 = np.arange(NN).reshape(shp)
j1 = - np.ones_like(i12)
j2 = - np.ones_like(i12)
# make the connectivity for the non-flats/pits
j1, j2 = self._mk_connectivity(section, i12, j1, j2)
j = np.row_stack((j1, j2))
i = np.row_stack((i12, i12))
# connectivity for flats/pits
if self.drain_pits:
pit_i, pit_j, pit_prop, flats, mag = \
self._mk_connectivity_pits(i12, flats, elev, mag, dX, dY)
j = np.concatenate([j.ravel(), pit_j]).astype('int64')
i = np.concatenate([i.ravel(), pit_i]).astype('int64')
mat_data = np.concatenate([mat_data.ravel(), pit_prop])
elif self.drain_flats:
j1, j2, mat_data, flat_i, flat_j, flat_prop = \
self._mk_connectivity_flats(
i12, j1, j2, mat_data, flats, elev, mag)
j = np.concatenate([j.ravel(), flat_j]).astype('int64')
i = np.concatenate([i.ravel(), flat_j]).astype('int64')
mat_data = np.concatenate([mat_data.ravel(), flat_prop])
# This prevents no-data values, remove connections when not present,
# and makes sure that floating point precision errors do not
# create circular references where a lower elevation cell drains
# to a higher elevation cell
I = ~np.isnan(mat_data) & (j != -1) & (mat_data > 1e-8) \
& (elev.ravel()[j] <= elev.ravel()[i])
mat_data = mat_data[I]
j = j[I]
i = i[I]
# %%Make the matrix and initialize
# What is A? The row i area receives area contributions from the
# entries in its columns. If all the entries in my columns have
# drained, then I can drain.
A = sps.csc_matrix((mat_data.ravel(),
np.row_stack((j.ravel(), i.ravel()))),
shape=(NN, NN))
normalize = np.array(A.sum(0) + 1e-16).squeeze()
A = np.dot(A, sps.diags(1/normalize, 0))
return A |
<SYSTEM_TASK:>
Calculates the topographic wetness index and saves the result in
<END_TASK>
<USER_TASK:>
Description:
def calc_twi(self):
"""
Calculates the topographic wetness index and saves the result in
self.twi.
Returns
-------
twi : array
Array giving the topographic wetness index at each pixel
""" |
if self.uca is None:
self.calc_uca()
gc.collect() # Just in case
min_area = self.twi_min_area
min_slope = self.twi_min_slope
twi = self.uca.copy()
if self.apply_twi_limits_on_uca:
twi[twi > self.uca_saturation_limit * min_area] = \
self.uca_saturation_limit * min_area
gc.collect() # Just in case
twi = np.log((twi) / (self.mag + min_slope))
# apply the cap
if self.apply_twi_limits:
twi_sat_value = \
np.log(self.uca_saturation_limit * min_area / min_slope)
twi[twi > twi_sat_value] = twi_sat_value
# multiply by 10 for better integer resolution when storing
self.twi = twi * 10
gc.collect() # Just in case
return twi |
<SYSTEM_TASK:>
A debug function to plot the direction calculated in various ways.
<END_TASK>
<USER_TASK:>
Description:
def _plot_debug_slopes_directions(self):
"""
A debug function to plot the direction calculated in various ways.
""" |
# %%
from matplotlib.pyplot import matshow, colorbar, clim, title
matshow(self.direction / np.pi * 180); colorbar(); clim(0, 360)
title('Direction')
mag2, direction2 = self._central_slopes_directions()
matshow(direction2 / np.pi * 180.0); colorbar(); clim(0, 360)
title('Direction (central difference)')
matshow(self.mag); colorbar()
title('Magnitude')
matshow(mag2); colorbar(); title("Magnitude (Central difference)")
# %%
# Compare to Taudem
filename = self.file_name
os.chdir('testtiff')
try:
os.remove('test_ang.tif')
os.remove('test_slp.tif')
except:
pass
cmd = ('dinfflowdir -fel "%s" -ang "%s" -slp "%s"' %
(os.path.split(filename)[-1], 'test_ang.tif', 'test_slp.tif'))
taudem._run(cmd)
td_file = GdalReader(file_name='test_ang.tif')
td_ang, = td_file.raster_layers
td_file2 = GdalReader(file_name='test_slp.tif')
td_mag, = td_file2.raster_layers
os.chdir('..')
matshow(td_ang.raster_data / np.pi*180); clim(0, 360); colorbar()
title('Taudem direction')
matshow(td_mag.raster_data); colorbar()
title('Taudem magnitude')
matshow(self.data); colorbar()
title('The test data (elevation)')
diff = (td_ang.raster_data - self.direction) / np.pi * 180.0
diff[np.abs(diff) > 300] = np.nan
matshow(diff); colorbar(); clim([-1, 1])
title('Taudem direction - calculated Direction')
# normalize magnitudes
mag2 = td_mag.raster_data
mag2 /= np.nanmax(mag2)
mag = self.mag.copy()
mag /= np.nanmax(mag)
matshow(mag - mag2); colorbar()
title('Taudem magnitude - calculated magnitude')
del td_file
del td_file2
del td_ang
del td_mag |
<SYSTEM_TASK:>
Cleanup generated document artifacts.
<END_TASK>
<USER_TASK:>
Description:
def clean(ctx, dry_run=False):
"""Cleanup generated document artifacts.""" |
basedir = ctx.sphinx.destdir or "build/docs"
cleanup_dirs([basedir], dry_run=dry_run) |
<SYSTEM_TASK:>
Find the tile neighbors based on filenames
<END_TASK>
<USER_TASK:>
Description:
def find_neighbors(neighbors, coords, I, source_files, f, sides):
"""Find the tile neighbors based on filenames
Parameters
-----------
neighbors : dict
Dictionary that stores the neighbors. Format is
neighbors["source_file_name"]["side"] = "neighbor_source_file_name"
coords : list
List of coordinates determined from the filename.
See :py:func:`utils.parse_fn`
I : array
Sort index. Different sorting schemes will speed up when neighbors
are found
source_files : list
List of strings of source file names
f : callable
Function that determines if two tiles are neighbors based on their
coordinates. f(c1, c2) returns True if tiles are neighbors
sides : list
List of 2 strings that give the "side" where tiles are neighbors.
Returns
-------
neighbors : dict
Dictionary of neighbors
Notes
-------
For example, if Tile1 is to the left of Tile2, then
neighbors['Tile1']['right'] = 'Tile2'
neighbors['Tile2']['left'] = 'Tile1'
""" |
for i, c1 in enumerate(coords):
me = source_files[I[i]]
# If the left neighbor has already been found...
if neighbors[me][sides[0]] != '':
continue
# could try coords[i:] (+ fixes) for speed if it becomes a problem
for j, c2 in enumerate(coords):
if f(c1, c2):
# then tiles are neighbors neighbors
neigh = source_files[I[j]]
neighbors[me][sides[0]] = neigh
neighbors[neigh][sides[1]] = me
break
return neighbors |
<SYSTEM_TASK:>
From the elevation filename, we can figure out and load the data and
<END_TASK>
<USER_TASK:>
Description:
def set_neighbor_data(self, elev_fn, dem_proc, interp=None):
"""
From the elevation filename, we can figure out and load the data and
done arrays.
""" |
if interp is None:
interp = self.build_interpolator(dem_proc)
opp = {'top': 'bottom', 'left': 'right'}
for key in self.neighbors[elev_fn].keys():
tile = self.neighbors[elev_fn][key]
if tile == '':
continue
oppkey = key
for me, neigh in opp.iteritems():
if me in key:
oppkey = oppkey.replace(me, neigh)
else:
oppkey = oppkey.replace(neigh, me)
opp_edge = self.neighbors[tile][oppkey]
if opp_edge == '':
continue
interp.values = dem_proc.uca[::-1, :]
# interp.values[:, 0] = np.ravel(dem_proc.uca) # for other interp.
# for the top-left tile we have to set the bottom and right edges
# of that tile, so two edges for those tiles
for key_ed in oppkey.split('-'):
self.edges[tile][key_ed].set_data('data', interp)
interp.values = dem_proc.edge_done[::-1, :].astype(float)
# interp.values[:, 0] = np.ravel(dem_proc.edge_done)
for key_ed in oppkey.split('-'):
self.edges[tile][key_ed].set_data('done', interp) |
<SYSTEM_TASK:>
Can figure out how to update the todo based on the elev filename
<END_TASK>
<USER_TASK:>
Description:
def update_edge_todo(self, elev_fn, dem_proc):
"""
Can figure out how to update the todo based on the elev filename
""" |
for key in self.edges[elev_fn].keys():
self.edges[elev_fn][key].set_data('todo', data=dem_proc.edge_todo) |
<SYSTEM_TASK:>
After finishing a calculation, this will update the neighbors and the
<END_TASK>
<USER_TASK:>
Description:
def update_edges(self, elev_fn, dem_proc):
"""
After finishing a calculation, this will update the neighbors and the
todo for that tile
""" |
interp = self.build_interpolator(dem_proc)
self.update_edge_todo(elev_fn, dem_proc)
self.set_neighbor_data(elev_fn, dem_proc, interp) |
<SYSTEM_TASK:>
Creates the initialization data from the edge structure
<END_TASK>
<USER_TASK:>
Description:
def get_edge_init_data(self, fn, save_path=None):
"""
Creates the initialization data from the edge structure
""" |
edge_init_data = {key: self.edges[fn][key].get('data') for key in
self.edges[fn].keys()}
edge_init_done = {key: self.edges[fn][key].get('done') for key in
self.edges[fn].keys()}
edge_init_todo = {key: self.edges[fn][key].get('todo') for key in
self.edges[fn].keys()}
return edge_init_data, edge_init_done, edge_init_todo |
<SYSTEM_TASK:>
Heuristically determines which tile should be recalculated based on
<END_TASK>
<USER_TASK:>
Description:
def find_best_candidate(self, elev_source_files=None):
"""
Heuristically determines which tile should be recalculated based on
updated edge information. Presently does not check if that tile is
locked, which could lead to a parallel thread closing while one thread
continues to process tiles.
""" |
self.fill_percent_done()
i_b = np.argmax(self.percent_done.values())
if self.percent_done.values()[i_b] <= 0:
return None
# check for ties
I = np.array(self.percent_done.values()) == \
self.percent_done.values()[i_b]
if I.sum() == 1:
pass # no ties
else:
I2 = np.argmax(np.array(self.max_elev.values())[I])
i_b = I.nonzero()[0][I2]
# Make sure the apples are still apples
assert(np.array(self.max_elev.keys())[I][I2]
== np.array(self.percent_done.keys())[I][I2])
if elev_source_files is not None:
fn = self.percent_done.keys()[i_b]
lckfn = _get_lockfile_name(fn)
if os.path.exists(lckfn): # another process is working on it
# Find a different Candidate
i_alt = np.argsort(self.percent_done.values())[::-1]
for i in i_alt:
fn = self.percent_done.keys()[i]
lckfn = _get_lockfile_name(fn)
if not os.path.exists(lckfn):
break
# Get and return the index
i_b = elev_source_files.index(fn)
return i_b |
<SYSTEM_TASK:>
Processes the hillshading
<END_TASK>
<USER_TASK:>
Description:
def process_command(self, command, save_name='custom', index=None):
"""
Processes the hillshading
Parameters
-----------
index : int/slice (optional)
Default: None - process all tiles in source directory. Otherwise,
will only process the index/indices of the files as listed in
self.elev_source_files
""" |
if index is not None:
elev_source_files = [self.elev_source_files[index]]
else:
elev_source_files = self.elev_source_files
save_root = os.path.join(self.save_path, save_name)
if not os.path.exists(save_root):
os.makedirs(save_root)
for i, esfile in enumerate(elev_source_files):
try:
status = 'Success' # optimism
# Check if file is locked
lckfn = _get_lockfile_name(esfile)
coords = parse_fn(esfile)
fn = get_fn_from_coords(coords, save_name)
fn = os.path.join(save_root, fn)
if os.path.exists(lckfn): # another process is working on it
print fn, 'is locked'
status = 'locked'
elif os.path.exists(fn):
print fn, 'already exists'
status = 'cached'
else: # lock this tile
print fn, '... calculating ', save_name
fid = file(lckfn, 'w')
fid.close()
# Calculate the custom process for this tile
status = command(esfile, fn)
os.remove(lckfn)
if index is None:
self.custom_status[i] = status
else:
self.custom_status[index] = status
except:
lckfn = _get_lockfile_name(esfile)
try:
os.remove(lckfn)
except:
pass
traceback.print_exc()
print traceback.format_exc()
if index is None:
self.custom_status[i] = "Error " + traceback.format_exc()
else:
self.custom_status[index] = "Error " + traceback.format_exc() |
<SYSTEM_TASK:>
Given a list of file paths for elevation files, this function will rename
<END_TASK>
<USER_TASK:>
Description:
def rename_files(files, name=None):
"""
Given a list of file paths for elevation files, this function will rename
those files to the format required by the pyDEM package.
This assumes a .tif extension.
Parameters
-----------
files : list
A list of strings of the paths to the elevation files that will be
renamed
name : str (optional)
Default = None. A suffix to the filename. For example
<filename>_suffix.tif
Notes
------
The files are renamed in the same directory as the original file locations
""" |
for fil in files:
elev_file = GdalReader(file_name=fil)
elev, = elev_file.raster_layers
fn = get_fn(elev, name)
del elev_file
del elev
fn = os.path.join(os.path.split(fil)[0], fn)
os.rename(fil, fn)
print "Renamed", fil, "to", fn |
<SYSTEM_TASK:>
This parses the file name and returns the coordinates of the tile
<END_TASK>
<USER_TASK:>
Description:
def parse_fn(fn):
""" This parses the file name and returns the coordinates of the tile
Parameters
-----------
fn : str
Filename of a GEOTIFF
Returns
--------
coords = [LLC.lat, LLC.lon, URC.lat, URC.lon]
""" |
try:
parts = os.path.splitext(os.path.split(fn)[-1])[0].replace('o', '.')\
.split('_')[:2]
coords = [float(crds)
for crds in re.split('[NSEW]', parts[0] + parts[1])[1:]]
except:
coords = [np.nan] * 4
return coords |
<SYSTEM_TASK:>
Determines the standard filename for a given GeoTIFF Layer.
<END_TASK>
<USER_TASK:>
Description:
def get_fn(elev, name=None):
"""
Determines the standard filename for a given GeoTIFF Layer.
Parameters
-----------
elev : GdalReader.raster_layer
A raster layer from the GdalReader object.
name : str (optional)
An optional suffix to the filename.
Returns
-------
fn : str
The standard <filename>_<name>.tif with suffix (if supplied)
""" |
gcs = elev.grid_coordinates
coords = [gcs.LLC.lat, gcs.LLC.lon, gcs.URC.lat, gcs.URC.lon]
return get_fn_from_coords(coords, name) |
<SYSTEM_TASK:>
Given a set of coordinates, returns the standard filename.
<END_TASK>
<USER_TASK:>
Description:
def get_fn_from_coords(coords, name=None):
""" Given a set of coordinates, returns the standard filename.
Parameters
-----------
coords : list
[LLC.lat, LLC.lon, URC.lat, URC.lon]
name : str (optional)
An optional suffix to the filename.
Returns
-------
fn : str
The standard <filename>_<name>.tif with suffix (if supplied)
""" |
NS1 = ["S", "N"][coords[0] > 0]
EW1 = ["W", "E"][coords[1] > 0]
NS2 = ["S", "N"][coords[2] > 0]
EW2 = ["W", "E"][coords[3] > 0]
new_name = "%s%0.3g%s%0.3g_%s%0.3g%s%0.3g" % \
(NS1, coords[0], EW1, coords[1], NS2, coords[2], EW2, coords[3])
if name is not None:
new_name += '_' + name
return new_name.replace('.', 'o') + '.tif' |
<SYSTEM_TASK:>
Extracts the change in x and y coordinates from the geotiff file. Presently
<END_TASK>
<USER_TASK:>
Description:
def mk_dx_dy_from_geotif_layer(geotif):
"""
Extracts the change in x and y coordinates from the geotiff file. Presently
only supports WGS-84 files.
""" |
ELLIPSOID_MAP = {'WGS84': 'WGS-84'}
ellipsoid = ELLIPSOID_MAP[geotif.grid_coordinates.wkt]
d = distance(ellipsoid=ellipsoid)
dx = geotif.grid_coordinates.x_axis
dy = geotif.grid_coordinates.y_axis
dX = np.zeros((dy.shape[0]-1))
for j in xrange(len(dX)):
dX[j] = d.measure((dy[j+1], dx[1]), (dy[j+1], dx[0])) * 1000 # km2m
dY = np.zeros((dy.shape[0]-1))
for i in xrange(len(dY)):
dY[i] = d.measure((dy[i], 0), (dy[i+1], 0)) * 1000 # km2m
return dX, dY |
<SYSTEM_TASK:>
Creates a new geotiff file objects using the WGS84 coordinate system, saves
<END_TASK>
<USER_TASK:>
Description:
def mk_geotiff_obj(raster, fn, bands=1, gdal_data_type=gdal.GDT_Float32,
lat=[46, 45], lon=[-73, -72]):
"""
Creates a new geotiff file objects using the WGS84 coordinate system, saves
it to disk, and returns a handle to the python file object and driver
Parameters
------------
raster : array
Numpy array of the raster data to be added to the object
fn : str
Name of the geotiff file
bands : int (optional)
See :py:func:`gdal.GetDriverByName('Gtiff').Create
gdal_data : gdal.GDT_<type>
Gdal data type (see gdal.GDT_...)
lat : list
northern lat, southern lat
lon : list
[western lon, eastern lon]
""" |
NNi, NNj = raster.shape
driver = gdal.GetDriverByName('GTiff')
obj = driver.Create(fn, NNj, NNi, bands, gdal_data_type)
pixel_height = -np.abs(lat[0] - lat[1]) / (NNi - 1.0)
pixel_width = np.abs(lon[0] - lon[1]) / (NNj - 1.0)
obj.SetGeoTransform([lon[0], pixel_width, 0, lat[0], 0, pixel_height])
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
obj.SetProjection(srs.ExportToWkt())
obj.GetRasterBand(1).WriteArray(raster)
return obj, driver |
<SYSTEM_TASK:>
Sorts array "a" by columns i
<END_TASK>
<USER_TASK:>
Description:
def sortrows(a, i=0, index_out=False, recurse=True):
""" Sorts array "a" by columns i
Parameters
------------
a : np.ndarray
array to be sorted
i : int (optional)
column to be sorted by, taken as 0 by default
index_out : bool (optional)
return the index I such that a(I) = sortrows(a,i). Default = False
recurse : bool (optional)
recursively sort by each of the columns. i.e.
once column i is sort, we sort the smallest column number
etc. True by default.
Returns
--------
a : np.ndarray
The array 'a' sorted in descending order by column i
I : np.ndarray (optional)
The index such that a[I, :] = sortrows(a, i). Only return if
index_out = True
Examples
---------
>>> a = array([[1,2],[3,1],[2,3]])
>>> b = sortrows(a,0)
>>> b
array([[1, 2],
[2, 3],
[3, 1]])
c, I = sortrows(a,1,True)
>>> c
array([[3, 1],
[1, 2],
[2, 3]])
>>> I
array([1, 0, 2])
>>> a[I,:] - c
array([[0, 0],
[0, 0],
[0, 0]])
""" |
I = np.argsort(a[:, i])
a = a[I, :]
# We recursively call sortrows to make sure it is sorted best by every
# column
if recurse & (len(a[0]) > i + 1):
for b in np.unique(a[:, i]):
ids = a[:, i] == b
colids = range(i) + range(i+1, len(a[0]))
a[np.ix_(ids, colids)], I2 = sortrows(a[np.ix_(ids, colids)],
0, True, True)
I[ids] = I[np.nonzero(ids)[0][I2]]
if index_out:
return a, I
else:
return a |
<SYSTEM_TASK:>
Get flattened indices for the border of the region I.
<END_TASK>
<USER_TASK:>
Description:
def get_border_index(I, shape, size):
"""
Get flattened indices for the border of the region I.
Parameters
----------
I : np.ndarray(dtype=int)
indices in the flattened region.
size : int
region size (technically computable from shape argument)
shape : tuple(int, int)
region shape
Returns
-------
J : np.ndarray(dtype=int)
indices orthogonally and diagonally bordering I
""" |
J = get_adjacent_index(I, shape, size)
# instead of setdiff?
# border = np.zeros(size)
# border[J] = 1
# border[I] = 0
# J, = np.where(border)
return np.setdiff1d(J, I) |
<SYSTEM_TASK:>
Get border of the region as a boolean array mask.
<END_TASK>
<USER_TASK:>
Description:
def get_border_mask(region):
"""
Get border of the region as a boolean array mask.
Parameters
----------
region : np.ndarray(shape=(m, n), dtype=bool)
mask of the region
Returns
-------
border : np.ndarray(shape=(m, n), dtype=bool)
mask of the region border (not including region)
""" |
# common special case (for efficiency)
internal = region[1:-1, 1:-1]
if internal.all() and internal.any():
return ~region
I, = np.where(region.ravel())
J = get_adjacent_index(I, region.shape, region.size)
border = np.zeros(region.size, dtype='bool')
border[J] = 1
border[I] = 0
border = border.reshape(region.shape)
return border |
<SYSTEM_TASK:>
Compute within-region distances from the src pixels.
<END_TASK>
<USER_TASK:>
Description:
def get_distance(region, src):
"""
Compute within-region distances from the src pixels.
Parameters
----------
region : np.ndarray(shape=(m, n), dtype=bool)
mask of the region
src : np.ndarray(shape=(m, n), dtype=bool)
mask of the source pixels to compute distances from.
Returns
-------
d : np.ndarray(shape=(m, n), dtype=float)
approximate within-region distance from the nearest src pixel;
(distances outside of the region are arbitrary).
""" |
dmax = float(region.size)
d = np.full(region.shape, dmax)
d[src] = 0
for n in range(region.size):
d_orth = minimum_filter(d, footprint=_ORTH2) + 1
d_diag = minimum_filter(d, (3, 3)) + _SQRT2
d_adj = np.minimum(d_orth[region], d_diag[region])
d[region] = np.minimum(d_adj, d[region])
if (d[region] < dmax).all():
break
return d |
<SYSTEM_TASK:>
Grow a slice object by 1 in each direction without overreaching the list.
<END_TASK>
<USER_TASK:>
Description:
def grow_slice(slc, size):
"""
Grow a slice object by 1 in each direction without overreaching the list.
Parameters
----------
slc: slice
slice object to grow
size: int
list length
Returns
-------
slc: slice
extended slice
""" |
return slice(max(0, slc.start-1), min(size, slc.stop+1)) |
<SYSTEM_TASK:>
Check if a 2d object is on the edge of the array.
<END_TASK>
<USER_TASK:>
Description:
def is_edge(obj, shape):
"""
Check if a 2d object is on the edge of the array.
Parameters
----------
obj : tuple(slice, slice)
Pair of slices (e.g. from scipy.ndimage.measurements.find_objects)
shape : tuple(int, int)
Array shape.
Returns
-------
b : boolean
True if the object touches any edge of the array, else False.
""" |
if obj[0].start == 0: return True
if obj[1].start == 0: return True
if obj[0].stop == shape[0]: return True
if obj[1].stop == shape[1]: return True
return False |
<SYSTEM_TASK:>
Pops a chunk of the given max size.
<END_TASK>
<USER_TASK:>
Description:
def pop_chunk(self, chunk_max_size):
"""Pops a chunk of the given max size.
Optimized to avoid too much string copies.
Args:
chunk_max_size (int): max size of the returned chunk.
Returns:
string (bytes) with a size <= chunk_max_size.
""" |
if self._total_length < chunk_max_size:
# fastpath (the whole queue fit in a single chunk)
res = self._tobytes()
self.clear()
return res
first_iteration = True
while True:
try:
data = self._deque.popleft()
data_length = len(data)
self._total_length -= data_length
if first_iteration:
# first iteration
if data_length == chunk_max_size:
# we are lucky !
return data
elif data_length > chunk_max_size:
# we have enough data at first iteration
# => fast path optimization
view = self._get_pointer_or_memoryview(data,
data_length)
self.appendleft(view[chunk_max_size:])
return view[:chunk_max_size]
else:
# no single iteration fast path optimization :-(
# let's use a WriteBuffer to build the result chunk
chunk_write_buffer = WriteBuffer()
else:
# not first iteration
if chunk_write_buffer._total_length + data_length \
> chunk_max_size:
view = self._get_pointer_or_memoryview(data,
data_length)
limit = chunk_max_size - \
chunk_write_buffer._total_length - data_length
self.appendleft(view[limit:])
data = view[:limit]
chunk_write_buffer.append(data)
if chunk_write_buffer._total_length >= chunk_max_size:
break
except IndexError:
# the buffer is empty (so no memoryview inside)
self._has_view = False
break
first_iteration = False
return chunk_write_buffer._tobytes() |
<SYSTEM_TASK:>
Return an absolute version of this path. This function works
<END_TASK>
<USER_TASK:>
Description:
def absolute(self):
"""Return an absolute version of this path. This function works
even if the path doesn't point to anything.
No normalization is done, i.e. all '.' and '..' will be kept along.
Use resolve() to get the canonical path to a file.
""" |
# XXX untested yet!
if self.is_absolute():
return self
# FIXME this must defer to the specific flavour (and, under Windows,
# use nt._getfullpathname())
obj = self._from_parts([os.getcwd()] + self._parts, init=False)
obj._init(template=self)
return obj |
<SYSTEM_TASK:>
Whether this path is a symbolic link.
<END_TASK>
<USER_TASK:>
Description:
def is_symlink(self):
"""
Whether this path is a symbolic link.
""" |
try:
return S_ISLNK(self.lstat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist
return False |
<SYSTEM_TASK:>
Whether this path is a block device.
<END_TASK>
<USER_TASK:>
Description:
def is_block_device(self):
"""
Whether this path is a block device.
""" |
try:
return S_ISBLK(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False |
<SYSTEM_TASK:>
Whether this path is a character device.
<END_TASK>
<USER_TASK:>
Description:
def is_char_device(self):
"""
Whether this path is a character device.
""" |
try:
return S_ISCHR(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False |
<SYSTEM_TASK:>
returns True if the GC's overlap.
<END_TASK>
<USER_TASK:>
Description:
def intersects(self, other_grid_coordinates):
""" returns True if the GC's overlap. """ |
ogc = other_grid_coordinates # alias
# for explanation: http://stackoverflow.com/questions/306316/determine-if-two-rectangles-overlap-each-other
# Note the flipped y-coord in this coord system.
ax1, ay1, ax2, ay2 = self.ULC.lon, self.ULC.lat, self.LRC.lon, self.LRC.lat
bx1, by1, bx2, by2 = ogc.ULC.lon, ogc.ULC.lat, ogc.LRC.lon, ogc.LRC.lat
if ((ax1 <= bx2) and (ax2 >= bx1) and (ay1 >= by2) and (ay2 <= by1)):
return True
else:
return False |
<SYSTEM_TASK:>
Use pixel centers when appropriate.
<END_TASK>
<USER_TASK:>
Description:
def raster_to_projection_coords(self, pixel_x, pixel_y):
""" Use pixel centers when appropriate.
See documentation for the GDAL function GetGeoTransform for details. """ |
h_px_py = np.array([1, pixel_x, pixel_y])
gt = np.array([[1, 0, 0], self.geotransform[0:3], self.geotransform[3:6]])
arr = np.inner(gt, h_px_py)
return arr[2], arr[1] |
<SYSTEM_TASK:>
Returns pixel centers.
<END_TASK>
<USER_TASK:>
Description:
def projection_to_raster_coords(self, lat, lon):
""" Returns pixel centers.
See documentation for the GDAL function GetGeoTransform for details. """ |
r_px_py = np.array([1, lon, lat])
tg = inv(np.array([[1, 0, 0], self.geotransform[0:3], self.geotransform[3:6]]))
return np.inner(tg, r_px_py)[1:] |
<SYSTEM_TASK:>
Reprojects data in this layer to match that in the GridCoordinates
<END_TASK>
<USER_TASK:>
Description:
def reproject_to_grid_coordinates(self, grid_coordinates, interp=gdalconst.GRA_NearestNeighbour):
""" Reprojects data in this layer to match that in the GridCoordinates
object. """ |
source_dataset = self.grid_coordinates._as_gdal_dataset()
dest_dataset = grid_coordinates._as_gdal_dataset()
rb = source_dataset.GetRasterBand(1)
rb.SetNoDataValue(NO_DATA_VALUE)
rb.WriteArray(np.ma.filled(self.raster_data, NO_DATA_VALUE))
gdal.ReprojectImage(source_dataset, dest_dataset,
source_dataset.GetProjection(),
dest_dataset.GetProjection(),
interp)
dest_layer = self.clone_traits()
dest_layer.grid_coordinates = grid_coordinates
rb = dest_dataset.GetRasterBand(1)
dest_layer.raster_data = np.ma.masked_values(rb.ReadAsArray(), NO_DATA_VALUE)
return dest_layer |
<SYSTEM_TASK:>
Replace masked-out elements in an array using an iterative image inpainting algorithm.
<END_TASK>
<USER_TASK:>
Description:
def inpaint(self):
""" Replace masked-out elements in an array using an iterative image inpainting algorithm. """ |
import inpaint
filled = inpaint.replace_nans(np.ma.filled(self.raster_data, np.NAN).astype(np.float32), 3, 0.01, 2)
self.raster_data = np.ma.masked_invalid(filled) |
<SYSTEM_TASK:>
Gets a connected Client object.
<END_TASK>
<USER_TASK:>
Description:
def get_connected_client(self):
"""Gets a connected Client object.
If max_size is reached, this method will block until a new client
object is available.
Returns:
A Future object with connected Client instance as a result
(or ClientError if there was a connection problem)
""" |
if self.__sem is not None:
yield self.__sem.acquire()
client = None
newly_created, client = self._get_client_from_pool_or_make_it()
if newly_created:
res = yield client.connect()
if not res:
LOG.warning("can't connect to %s", client.title)
raise tornado.gen.Return(
ClientError("can't connect to %s" % client.title))
raise tornado.gen.Return(client) |
<SYSTEM_TASK:>
Returns a ContextManagerFuture to be yielded in a with statement.
<END_TASK>
<USER_TASK:>
Description:
def connected_client(self):
"""Returns a ContextManagerFuture to be yielded in a with statement.
Returns:
A ContextManagerFuture object.
Examples:
>>> with (yield pool.connected_client()) as client:
# client is a connected tornadis.Client instance
# it will be automatically released to the pool thanks to
# the "with" keyword
reply = yield client.call("PING")
""" |
future = self.get_connected_client()
cb = functools.partial(self._connected_client_release_cb, future)
return ContextManagerFuture(future, cb) |
<SYSTEM_TASK:>
Releases a client object to the pool.
<END_TASK>
<USER_TASK:>
Description:
def release_client(self, client):
"""Releases a client object to the pool.
Args:
client: Client object.
""" |
if isinstance(client, Client):
if not self._is_expired_client(client):
LOG.debug('Client is not expired. Adding back to pool')
self.__pool.append(client)
elif client.is_connected():
LOG.debug('Client is expired and connected. Disconnecting')
client.disconnect()
if self.__sem is not None:
self.__sem.release() |
<SYSTEM_TASK:>
Disconnects all pooled client objects.
<END_TASK>
<USER_TASK:>
Description:
def destroy(self):
"""Disconnects all pooled client objects.""" |
while True:
try:
client = self.__pool.popleft()
if isinstance(client, Client):
client.disconnect()
except IndexError:
break |
<SYSTEM_TASK:>
Selects the parameters of a config section schema.
<END_TASK>
<USER_TASK:>
Description:
def select_params_from_section_schema(section_schema, param_class=Param,
deep=False):
"""Selects the parameters of a config section schema.
:param section_schema: Configuration file section schema to use.
:return: Generator of params
""" |
# pylint: disable=invalid-name
for name, value in inspect.getmembers(section_schema):
if name.startswith("__") or value is None:
continue # pragma: no cover
elif inspect.isclass(value) and deep:
# -- CASE: class => SELF-CALL (recursively).
# pylint: disable= bad-continuation
cls = value
for name, value in select_params_from_section_schema(cls,
param_class=param_class, deep=True):
yield (name, value)
elif isinstance(value, param_class):
yield (name, value) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.