sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def find_best_candidate(self, elev_source_files=None):
"""
Heuristically determines which tile should be recalculated based on
updated edge information. Presently does not check if that tile is
locked, which could lead to a parallel thread closing while one thread
continues to process tiles.
"""
self.fill_percent_done()
i_b = np.argmax(self.percent_done.values())
if self.percent_done.values()[i_b] <= 0:
return None
# check for ties
I = np.array(self.percent_done.values()) == \
self.percent_done.values()[i_b]
if I.sum() == 1:
pass # no ties
else:
I2 = np.argmax(np.array(self.max_elev.values())[I])
i_b = I.nonzero()[0][I2]
# Make sure the apples are still apples
assert(np.array(self.max_elev.keys())[I][I2]
== np.array(self.percent_done.keys())[I][I2])
if elev_source_files is not None:
fn = self.percent_done.keys()[i_b]
lckfn = _get_lockfile_name(fn)
if os.path.exists(lckfn): # another process is working on it
# Find a different Candidate
i_alt = np.argsort(self.percent_done.values())[::-1]
for i in i_alt:
fn = self.percent_done.keys()[i]
lckfn = _get_lockfile_name(fn)
if not os.path.exists(lckfn):
break
# Get and return the index
i_b = elev_source_files.index(fn)
return i_b | Heuristically determines which tile should be recalculated based on
updated edge information. Presently does not check if that tile is
locked, which could lead to a parallel thread closing while one thread
continues to process tiles. | entailment |
def process_twi(self, index=None, do_edges=False, skip_uca_twi=False):
"""
Processes the TWI, along with any dependencies (like the slope and UCA)
Parameters
-----------
index : int/slice (optional)
Default: None - process all tiles in source directory. Otherwise,
will only process the index/indices of the files as listed in
self.elev_source_files
do_edges : bool (optional)
Default False. When false, the UCA will be calculated with
available edge information if the UCA was not previously computed.
If the UCA was previously computed and do_edges == False, the UCA
will not be updated. If do_edges == True, the UCA will also be
recalculated.
skip_uca_twi : bool (optional)
Skips the calculation of the UCA and TWI (only calculates the
magnitude and direction)
Notes
------
do_edges = False for the first round of the processing, but it is True
for the second round.
"""
if index is not None:
elev_source_files = [self.elev_source_files[index]]
else:
elev_source_files = self.elev_source_files
for i, esfile in enumerate(elev_source_files):
try:
fn, status = self.calculate_twi(esfile,
save_path=self.save_path,
do_edges=do_edges,
skip_uca_twi=skip_uca_twi)
if index is None:
self.twi_status[i] = status
else:
self.twi_status[index] = status
except:
lckfn = _get_lockfile_name(esfile)
try:
os.remove(lckfn)
except:
pass
traceback.print_exc()
print traceback.format_exc()
if index is None:
self.twi_status[i] = "Error " + traceback.format_exc()
else:
self.twi_status[index] = "Error " + traceback.format_exc() | Processes the TWI, along with any dependencies (like the slope and UCA)
Parameters
-----------
index : int/slice (optional)
Default: None - process all tiles in source directory. Otherwise,
will only process the index/indices of the files as listed in
self.elev_source_files
do_edges : bool (optional)
Default False. When false, the UCA will be calculated with
available edge information if the UCA was not previously computed.
If the UCA was previously computed and do_edges == False, the UCA
will not be updated. If do_edges == True, the UCA will also be
recalculated.
skip_uca_twi : bool (optional)
Skips the calculation of the UCA and TWI (only calculates the
magnitude and direction)
Notes
------
do_edges = False for the first round of the processing, but it is True
for the second round. | entailment |
def process(self, index=None):
"""
This will completely process a directory of elevation tiles (as
supplied in the constructor). Both phases of the calculation, the
single tile and edge resolution phases are run.
Parameters
-----------
index : int/slice (optional)
Default None - processes all tiles in a directory. See
:py:func:`process_twi` for additional options.
"""
# Round 0 of twi processing, process the magnitude and directions of
# slopes
print "Starting slope calculation round"
self.process_twi(index, do_edges=False, skip_uca_twi=True)
# Round 1 of twi processing
print "Starting self-area calculation round"
self.process_twi(index, do_edges=False)
# Round 2 of twi processing: edge resolution
i = self.tile_edge.find_best_candidate(self.elev_source_files)
print "Starting edge resolution round: ",
count = 0
i_old = -1
same_count = 0
while i is not None and same_count < 3:
count += 1
print '*' * 10
print count, '(%d -- > %d) .' % (i_old, i)
# %%
self.process_twi(i, do_edges=True)
i_old = i
i = self.tile_edge.find_best_candidate(self.elev_source_files)
if i_old == i:
same_count += 1
else:
same_count = 0
print '*'*79
print '******* PROCESSING COMPLETED *******'
print '*'*79
return self | This will completely process a directory of elevation tiles (as
supplied in the constructor). Both phases of the calculation, the
single tile and edge resolution phases are run.
Parameters
-----------
index : int/slice (optional)
Default None - processes all tiles in a directory. See
:py:func:`process_twi` for additional options. | entailment |
def calculate_twi(self, esfile, save_path, use_cache=True, do_edges=False,
skip_uca_twi=False):
"""
Calculates twi for supplied elevation file
Parameters
-----------
esfile : str
Path to elevation file to be processed
save_path: str
Root path to location where TWI will be saved. TWI will be saved in
a subdirectory 'twi'.
use_cache : bool (optional)
Default True. If a temporary file exists (from a previous run),
the cached file will be used. Otherwise, if False, existing files
will be recomputed
do_edges : bool (optional)
See :py:func:`process_twi` for details on this argument.
skip_uca_twi : bool (optional)
Skips the calculation of the UCA and TWI (only calculates the
magnitude and direction)
"""
if os.path.exists(os.path.join(save_path, 'tile_edge.pkl')) and \
self.tile_edge is None:
with open(os.path.join(save_path, 'tile_edge.pkl'), 'r') as fid:
self.tile_edge = cPickle.load(fid)
elif self.tile_edge is None:
self.tile_edge = TileEdgeFile(self.elev_source_files, save_path)
with open(os.path.join(save_path, 'tile_edge.pkl'), 'wb') as fid:
cPickle.dump(self.tile_edge, fid)
status = 'Success' # optimism
# Check if file is locked
lckfn = _get_lockfile_name(esfile)
coords = parse_fn(esfile)
fn = get_fn_from_coords(coords, 'twi')
print '*'*79
if skip_uca_twi:
print '*'*10, fn, 'Slope Calculation starting...:', '*'*10
else:
print '*'*10, fn, 'TWI Calculation starting...:', '*'*10
print '*'*79
if os.path.exists(lckfn): # another process is working on it
print fn, 'is locked'
return fn, "Locked"
else: # lock this tile
fid = file(lckfn, 'w')
fid.close()
dem_proc = DEMProcessor(esfile)
# check if the slope already exists for the file. If yes, we should
# move on to the next tile without doing anything else
if skip_uca_twi \
and os.path.exists(dem_proc.get_full_fn('mag', save_path)
+ '.npz') \
and os.path.exists(dem_proc.get_full_fn('ang', save_path)
+ '.npz'):
print dem_proc.get_full_fn('mag', save_path) + '.npz', 'already exists'
print dem_proc.get_full_fn('ang', save_path) + '.npz', 'already exists'
# remove lock file
os.remove(lckfn)
return fn, 'Cached: Slope'
# check if the twi already exists for the file. If not in the edge
# resolution round, we should move on to the next tile
if os.path.exists(dem_proc.get_full_fn('twi', save_path)) \
and (do_edges is False):
print dem_proc.get_full_fn('twi', save_path), 'already exists'
# remove lock file
os.remove(lckfn)
return fn, 'Cached'
# only calculate the slopes and direction if they do not exist in cache
fn_ang = dem_proc.get_full_fn('ang', save_path)
fn_mag = dem_proc.get_full_fn('mag', save_path)
if os.path.exists(fn_ang + '.npz') and os.path.exists(fn_mag + '.npz')\
and not self.overwrite_cache:
dem_proc.load_direction(fn_ang)
dem_proc.load_slope(fn_mag)
dem_proc.find_flats()
else:
if os.path.exists(fn_ang + '.npz') and os.path_exists(fn_mag + '.npz')\
and self.overwrite_cache:
os.remove(fn_ang)
os.remove(fn_mag)
dem_proc.calc_slopes_directions()
dem_proc.save_slope(save_path, raw=True)
dem_proc.save_direction(save_path, raw=True)
if self._DEBUG:
dem_proc.save_slope(save_path, as_int=False)
dem_proc.save_direction(save_path, as_int=False)
if skip_uca_twi:
# remove lock file
os.remove(lckfn)
return fn, status + ":mag-dir-only"
fn_uca = dem_proc.get_full_fn('uca', save_path)
fn_uca_ec = dem_proc.get_full_fn('uca_edge_corrected', save_path)
fn_twi = dem_proc.get_full_fn('twi', save_path)
# check if edge structure exists for this tile and initialize
edge_init_data, edge_init_done, edge_init_todo = \
self.tile_edge.get_edge_init_data(esfile, save_path)
# Check if uca data exists (if yes, we are in the
# edge-resolution round)
uca_init = None
if os.path.exists(fn_uca + '.npz'):
if os.path.exists(fn_uca_ec + '.npz'):
dem_proc.load_uca(fn_uca_ec)
else:
dem_proc.load_uca(fn_uca)
uca_init = dem_proc.uca
if do_edges or uca_init is None:
dem_proc.calc_uca(uca_init=uca_init,
edge_init_data=[edge_init_data, edge_init_done,
edge_init_todo])
if uca_init is None:
dem_proc.save_uca(save_path, raw=True)
if self._DEBUG:
# Also save a geotiff for debugging
dem_proc.save_uca(save_path, as_int=False)
else:
if os.path.exists(fn_uca_ec):
os.remove(fn_uca_ec)
dem_proc.save_array(dem_proc.uca, None, 'uca_edge_corrected',
save_path, raw=True)
if self._DEBUG:
dem_proc.save_array(dem_proc.uca, None, 'uca_edge_corrected',
save_path, as_int=False)
# Saving Edge Data, and updating edges
self.tile_edge.update_edges(esfile, dem_proc)
dem_proc.calc_twi()
if os.path.exists(fn_twi):
os.remove(fn_twi)
dem_proc.save_twi(save_path, raw=False)
# clean up for in case
gc.collect()
# remove lock file
os.remove(lckfn)
# Save last-used dem_proc for debugging purposes
if self._DEBUG:
self.dem_proc = dem_proc
return fn, status | Calculates twi for supplied elevation file
Parameters
-----------
esfile : str
Path to elevation file to be processed
save_path: str
Root path to location where TWI will be saved. TWI will be saved in
a subdirectory 'twi'.
use_cache : bool (optional)
Default True. If a temporary file exists (from a previous run),
the cached file will be used. Otherwise, if False, existing files
will be recomputed
do_edges : bool (optional)
See :py:func:`process_twi` for details on this argument.
skip_uca_twi : bool (optional)
Skips the calculation of the UCA and TWI (only calculates the
magnitude and direction) | entailment |
def process_command(self, command, save_name='custom', index=None):
"""
Processes the hillshading
Parameters
-----------
index : int/slice (optional)
Default: None - process all tiles in source directory. Otherwise,
will only process the index/indices of the files as listed in
self.elev_source_files
"""
if index is not None:
elev_source_files = [self.elev_source_files[index]]
else:
elev_source_files = self.elev_source_files
save_root = os.path.join(self.save_path, save_name)
if not os.path.exists(save_root):
os.makedirs(save_root)
for i, esfile in enumerate(elev_source_files):
try:
status = 'Success' # optimism
# Check if file is locked
lckfn = _get_lockfile_name(esfile)
coords = parse_fn(esfile)
fn = get_fn_from_coords(coords, save_name)
fn = os.path.join(save_root, fn)
if os.path.exists(lckfn): # another process is working on it
print fn, 'is locked'
status = 'locked'
elif os.path.exists(fn):
print fn, 'already exists'
status = 'cached'
else: # lock this tile
print fn, '... calculating ', save_name
fid = file(lckfn, 'w')
fid.close()
# Calculate the custom process for this tile
status = command(esfile, fn)
os.remove(lckfn)
if index is None:
self.custom_status[i] = status
else:
self.custom_status[index] = status
except:
lckfn = _get_lockfile_name(esfile)
try:
os.remove(lckfn)
except:
pass
traceback.print_exc()
print traceback.format_exc()
if index is None:
self.custom_status[i] = "Error " + traceback.format_exc()
else:
self.custom_status[index] = "Error " + traceback.format_exc() | Processes the hillshading
Parameters
-----------
index : int/slice (optional)
Default: None - process all tiles in source directory. Otherwise,
will only process the index/indices of the files as listed in
self.elev_source_files | entailment |
def rename_files(files, name=None):
"""
Given a list of file paths for elevation files, this function will rename
those files to the format required by the pyDEM package.
This assumes a .tif extension.
Parameters
-----------
files : list
A list of strings of the paths to the elevation files that will be
renamed
name : str (optional)
Default = None. A suffix to the filename. For example
<filename>_suffix.tif
Notes
------
The files are renamed in the same directory as the original file locations
"""
for fil in files:
elev_file = GdalReader(file_name=fil)
elev, = elev_file.raster_layers
fn = get_fn(elev, name)
del elev_file
del elev
fn = os.path.join(os.path.split(fil)[0], fn)
os.rename(fil, fn)
print "Renamed", fil, "to", fn | Given a list of file paths for elevation files, this function will rename
those files to the format required by the pyDEM package.
This assumes a .tif extension.
Parameters
-----------
files : list
A list of strings of the paths to the elevation files that will be
renamed
name : str (optional)
Default = None. A suffix to the filename. For example
<filename>_suffix.tif
Notes
------
The files are renamed in the same directory as the original file locations | entailment |
def parse_fn(fn):
""" This parses the file name and returns the coordinates of the tile
Parameters
-----------
fn : str
Filename of a GEOTIFF
Returns
--------
coords = [LLC.lat, LLC.lon, URC.lat, URC.lon]
"""
try:
parts = os.path.splitext(os.path.split(fn)[-1])[0].replace('o', '.')\
.split('_')[:2]
coords = [float(crds)
for crds in re.split('[NSEW]', parts[0] + parts[1])[1:]]
except:
coords = [np.nan] * 4
return coords | This parses the file name and returns the coordinates of the tile
Parameters
-----------
fn : str
Filename of a GEOTIFF
Returns
--------
coords = [LLC.lat, LLC.lon, URC.lat, URC.lon] | entailment |
def get_fn(elev, name=None):
"""
Determines the standard filename for a given GeoTIFF Layer.
Parameters
-----------
elev : GdalReader.raster_layer
A raster layer from the GdalReader object.
name : str (optional)
An optional suffix to the filename.
Returns
-------
fn : str
The standard <filename>_<name>.tif with suffix (if supplied)
"""
gcs = elev.grid_coordinates
coords = [gcs.LLC.lat, gcs.LLC.lon, gcs.URC.lat, gcs.URC.lon]
return get_fn_from_coords(coords, name) | Determines the standard filename for a given GeoTIFF Layer.
Parameters
-----------
elev : GdalReader.raster_layer
A raster layer from the GdalReader object.
name : str (optional)
An optional suffix to the filename.
Returns
-------
fn : str
The standard <filename>_<name>.tif with suffix (if supplied) | entailment |
def get_fn_from_coords(coords, name=None):
""" Given a set of coordinates, returns the standard filename.
Parameters
-----------
coords : list
[LLC.lat, LLC.lon, URC.lat, URC.lon]
name : str (optional)
An optional suffix to the filename.
Returns
-------
fn : str
The standard <filename>_<name>.tif with suffix (if supplied)
"""
NS1 = ["S", "N"][coords[0] > 0]
EW1 = ["W", "E"][coords[1] > 0]
NS2 = ["S", "N"][coords[2] > 0]
EW2 = ["W", "E"][coords[3] > 0]
new_name = "%s%0.3g%s%0.3g_%s%0.3g%s%0.3g" % \
(NS1, coords[0], EW1, coords[1], NS2, coords[2], EW2, coords[3])
if name is not None:
new_name += '_' + name
return new_name.replace('.', 'o') + '.tif' | Given a set of coordinates, returns the standard filename.
Parameters
-----------
coords : list
[LLC.lat, LLC.lon, URC.lat, URC.lon]
name : str (optional)
An optional suffix to the filename.
Returns
-------
fn : str
The standard <filename>_<name>.tif with suffix (if supplied) | entailment |
def mk_dx_dy_from_geotif_layer(geotif):
"""
Extracts the change in x and y coordinates from the geotiff file. Presently
only supports WGS-84 files.
"""
ELLIPSOID_MAP = {'WGS84': 'WGS-84'}
ellipsoid = ELLIPSOID_MAP[geotif.grid_coordinates.wkt]
d = distance(ellipsoid=ellipsoid)
dx = geotif.grid_coordinates.x_axis
dy = geotif.grid_coordinates.y_axis
dX = np.zeros((dy.shape[0]-1))
for j in xrange(len(dX)):
dX[j] = d.measure((dy[j+1], dx[1]), (dy[j+1], dx[0])) * 1000 # km2m
dY = np.zeros((dy.shape[0]-1))
for i in xrange(len(dY)):
dY[i] = d.measure((dy[i], 0), (dy[i+1], 0)) * 1000 # km2m
return dX, dY | Extracts the change in x and y coordinates from the geotiff file. Presently
only supports WGS-84 files. | entailment |
def mk_geotiff_obj(raster, fn, bands=1, gdal_data_type=gdal.GDT_Float32,
lat=[46, 45], lon=[-73, -72]):
"""
Creates a new geotiff file objects using the WGS84 coordinate system, saves
it to disk, and returns a handle to the python file object and driver
Parameters
------------
raster : array
Numpy array of the raster data to be added to the object
fn : str
Name of the geotiff file
bands : int (optional)
See :py:func:`gdal.GetDriverByName('Gtiff').Create
gdal_data : gdal.GDT_<type>
Gdal data type (see gdal.GDT_...)
lat : list
northern lat, southern lat
lon : list
[western lon, eastern lon]
"""
NNi, NNj = raster.shape
driver = gdal.GetDriverByName('GTiff')
obj = driver.Create(fn, NNj, NNi, bands, gdal_data_type)
pixel_height = -np.abs(lat[0] - lat[1]) / (NNi - 1.0)
pixel_width = np.abs(lon[0] - lon[1]) / (NNj - 1.0)
obj.SetGeoTransform([lon[0], pixel_width, 0, lat[0], 0, pixel_height])
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
obj.SetProjection(srs.ExportToWkt())
obj.GetRasterBand(1).WriteArray(raster)
return obj, driver | Creates a new geotiff file objects using the WGS84 coordinate system, saves
it to disk, and returns a handle to the python file object and driver
Parameters
------------
raster : array
Numpy array of the raster data to be added to the object
fn : str
Name of the geotiff file
bands : int (optional)
See :py:func:`gdal.GetDriverByName('Gtiff').Create
gdal_data : gdal.GDT_<type>
Gdal data type (see gdal.GDT_...)
lat : list
northern lat, southern lat
lon : list
[western lon, eastern lon] | entailment |
def sortrows(a, i=0, index_out=False, recurse=True):
""" Sorts array "a" by columns i
Parameters
------------
a : np.ndarray
array to be sorted
i : int (optional)
column to be sorted by, taken as 0 by default
index_out : bool (optional)
return the index I such that a(I) = sortrows(a,i). Default = False
recurse : bool (optional)
recursively sort by each of the columns. i.e.
once column i is sort, we sort the smallest column number
etc. True by default.
Returns
--------
a : np.ndarray
The array 'a' sorted in descending order by column i
I : np.ndarray (optional)
The index such that a[I, :] = sortrows(a, i). Only return if
index_out = True
Examples
---------
>>> a = array([[1,2],[3,1],[2,3]])
>>> b = sortrows(a,0)
>>> b
array([[1, 2],
[2, 3],
[3, 1]])
c, I = sortrows(a,1,True)
>>> c
array([[3, 1],
[1, 2],
[2, 3]])
>>> I
array([1, 0, 2])
>>> a[I,:] - c
array([[0, 0],
[0, 0],
[0, 0]])
"""
I = np.argsort(a[:, i])
a = a[I, :]
# We recursively call sortrows to make sure it is sorted best by every
# column
if recurse & (len(a[0]) > i + 1):
for b in np.unique(a[:, i]):
ids = a[:, i] == b
colids = range(i) + range(i+1, len(a[0]))
a[np.ix_(ids, colids)], I2 = sortrows(a[np.ix_(ids, colids)],
0, True, True)
I[ids] = I[np.nonzero(ids)[0][I2]]
if index_out:
return a, I
else:
return a | Sorts array "a" by columns i
Parameters
------------
a : np.ndarray
array to be sorted
i : int (optional)
column to be sorted by, taken as 0 by default
index_out : bool (optional)
return the index I such that a(I) = sortrows(a,i). Default = False
recurse : bool (optional)
recursively sort by each of the columns. i.e.
once column i is sort, we sort the smallest column number
etc. True by default.
Returns
--------
a : np.ndarray
The array 'a' sorted in descending order by column i
I : np.ndarray (optional)
The index such that a[I, :] = sortrows(a, i). Only return if
index_out = True
Examples
---------
>>> a = array([[1,2],[3,1],[2,3]])
>>> b = sortrows(a,0)
>>> b
array([[1, 2],
[2, 3],
[3, 1]])
c, I = sortrows(a,1,True)
>>> c
array([[3, 1],
[1, 2],
[2, 3]])
>>> I
array([1, 0, 2])
>>> a[I,:] - c
array([[0, 0],
[0, 0],
[0, 0]]) | entailment |
def get_adjacent_index(I, shape, size):
"""
Find indices 2d-adjacent to those in I. Helper function for get_border*.
Parameters
----------
I : np.ndarray(dtype=int)
indices in the flattened region
shape : tuple(int, int)
region shape
size : int
region size (technically computable from shape)
Returns
-------
J : np.ndarray(dtype=int)
indices orthogonally and diagonally adjacent to I
"""
m, n = shape
In = I % n
bL = In != 0
bR = In != n-1
J = np.concatenate([
# orthonally adjacent
I - n,
I[bL] - 1,
I[bR] + 1,
I + n,
# diagonally adjacent
I[bL] - n-1,
I[bR] - n+1,
I[bL] + n-1,
I[bR] + n+1])
# remove indices outside the array
J = J[(J>=0) & (J<size)]
return J | Find indices 2d-adjacent to those in I. Helper function for get_border*.
Parameters
----------
I : np.ndarray(dtype=int)
indices in the flattened region
shape : tuple(int, int)
region shape
size : int
region size (technically computable from shape)
Returns
-------
J : np.ndarray(dtype=int)
indices orthogonally and diagonally adjacent to I | entailment |
def get_border_index(I, shape, size):
"""
Get flattened indices for the border of the region I.
Parameters
----------
I : np.ndarray(dtype=int)
indices in the flattened region.
size : int
region size (technically computable from shape argument)
shape : tuple(int, int)
region shape
Returns
-------
J : np.ndarray(dtype=int)
indices orthogonally and diagonally bordering I
"""
J = get_adjacent_index(I, shape, size)
# instead of setdiff?
# border = np.zeros(size)
# border[J] = 1
# border[I] = 0
# J, = np.where(border)
return np.setdiff1d(J, I) | Get flattened indices for the border of the region I.
Parameters
----------
I : np.ndarray(dtype=int)
indices in the flattened region.
size : int
region size (technically computable from shape argument)
shape : tuple(int, int)
region shape
Returns
-------
J : np.ndarray(dtype=int)
indices orthogonally and diagonally bordering I | entailment |
def get_border_mask(region):
"""
Get border of the region as a boolean array mask.
Parameters
----------
region : np.ndarray(shape=(m, n), dtype=bool)
mask of the region
Returns
-------
border : np.ndarray(shape=(m, n), dtype=bool)
mask of the region border (not including region)
"""
# common special case (for efficiency)
internal = region[1:-1, 1:-1]
if internal.all() and internal.any():
return ~region
I, = np.where(region.ravel())
J = get_adjacent_index(I, region.shape, region.size)
border = np.zeros(region.size, dtype='bool')
border[J] = 1
border[I] = 0
border = border.reshape(region.shape)
return border | Get border of the region as a boolean array mask.
Parameters
----------
region : np.ndarray(shape=(m, n), dtype=bool)
mask of the region
Returns
-------
border : np.ndarray(shape=(m, n), dtype=bool)
mask of the region border (not including region) | entailment |
def get_distance(region, src):
"""
Compute within-region distances from the src pixels.
Parameters
----------
region : np.ndarray(shape=(m, n), dtype=bool)
mask of the region
src : np.ndarray(shape=(m, n), dtype=bool)
mask of the source pixels to compute distances from.
Returns
-------
d : np.ndarray(shape=(m, n), dtype=float)
approximate within-region distance from the nearest src pixel;
(distances outside of the region are arbitrary).
"""
dmax = float(region.size)
d = np.full(region.shape, dmax)
d[src] = 0
for n in range(region.size):
d_orth = minimum_filter(d, footprint=_ORTH2) + 1
d_diag = minimum_filter(d, (3, 3)) + _SQRT2
d_adj = np.minimum(d_orth[region], d_diag[region])
d[region] = np.minimum(d_adj, d[region])
if (d[region] < dmax).all():
break
return d | Compute within-region distances from the src pixels.
Parameters
----------
region : np.ndarray(shape=(m, n), dtype=bool)
mask of the region
src : np.ndarray(shape=(m, n), dtype=bool)
mask of the source pixels to compute distances from.
Returns
-------
d : np.ndarray(shape=(m, n), dtype=float)
approximate within-region distance from the nearest src pixel;
(distances outside of the region are arbitrary). | entailment |
def grow_slice(slc, size):
"""
Grow a slice object by 1 in each direction without overreaching the list.
Parameters
----------
slc: slice
slice object to grow
size: int
list length
Returns
-------
slc: slice
extended slice
"""
return slice(max(0, slc.start-1), min(size, slc.stop+1)) | Grow a slice object by 1 in each direction without overreaching the list.
Parameters
----------
slc: slice
slice object to grow
size: int
list length
Returns
-------
slc: slice
extended slice | entailment |
def is_edge(obj, shape):
"""
Check if a 2d object is on the edge of the array.
Parameters
----------
obj : tuple(slice, slice)
Pair of slices (e.g. from scipy.ndimage.measurements.find_objects)
shape : tuple(int, int)
Array shape.
Returns
-------
b : boolean
True if the object touches any edge of the array, else False.
"""
if obj[0].start == 0: return True
if obj[1].start == 0: return True
if obj[0].stop == shape[0]: return True
if obj[1].stop == shape[1]: return True
return False | Check if a 2d object is on the edge of the array.
Parameters
----------
obj : tuple(slice, slice)
Pair of slices (e.g. from scipy.ndimage.measurements.find_objects)
shape : tuple(int, int)
Array shape.
Returns
-------
b : boolean
True if the object touches any edge of the array, else False. | entailment |
def find_centroid(region):
"""
Finds an approximate centroid for a region that is within the region.
Parameters
----------
region : np.ndarray(shape=(m, n), dtype='bool')
mask of the region.
Returns
-------
i, j : tuple(int, int)
2d index within the region nearest the center of mass.
"""
x, y = center_of_mass(region)
w = np.argwhere(region)
i, j = w[np.argmin(np.linalg.norm(w - (x, y), axis=1))]
return i, j | Finds an approximate centroid for a region that is within the region.
Parameters
----------
region : np.ndarray(shape=(m, n), dtype='bool')
mask of the region.
Returns
-------
i, j : tuple(int, int)
2d index within the region nearest the center of mass. | entailment |
def clear(self):
"""Resets the object at its initial (empty) state."""
self._deque.clear()
self._total_length = 0
self._has_view = False | Resets the object at its initial (empty) state. | entailment |
def _tobytes(self):
"""Serializes the write buffer into a single string (bytes).
Returns:
a string (bytes) object.
"""
if not self._has_view:
# fast path optimization
if len(self._deque) == 0:
return b""
elif len(self._deque) == 1:
# no copy
return self._deque[0]
else:
return b"".join(self._deque)
else:
tmp = [x.tobytes() if isinstance(x, memoryview) else x
for x in self._deque]
return b"".join(tmp) | Serializes the write buffer into a single string (bytes).
Returns:
a string (bytes) object. | entailment |
def pop_chunk(self, chunk_max_size):
"""Pops a chunk of the given max size.
Optimized to avoid too much string copies.
Args:
chunk_max_size (int): max size of the returned chunk.
Returns:
string (bytes) with a size <= chunk_max_size.
"""
if self._total_length < chunk_max_size:
# fastpath (the whole queue fit in a single chunk)
res = self._tobytes()
self.clear()
return res
first_iteration = True
while True:
try:
data = self._deque.popleft()
data_length = len(data)
self._total_length -= data_length
if first_iteration:
# first iteration
if data_length == chunk_max_size:
# we are lucky !
return data
elif data_length > chunk_max_size:
# we have enough data at first iteration
# => fast path optimization
view = self._get_pointer_or_memoryview(data,
data_length)
self.appendleft(view[chunk_max_size:])
return view[:chunk_max_size]
else:
# no single iteration fast path optimization :-(
# let's use a WriteBuffer to build the result chunk
chunk_write_buffer = WriteBuffer()
else:
# not first iteration
if chunk_write_buffer._total_length + data_length \
> chunk_max_size:
view = self._get_pointer_or_memoryview(data,
data_length)
limit = chunk_max_size - \
chunk_write_buffer._total_length - data_length
self.appendleft(view[limit:])
data = view[:limit]
chunk_write_buffer.append(data)
if chunk_write_buffer._total_length >= chunk_max_size:
break
except IndexError:
# the buffer is empty (so no memoryview inside)
self._has_view = False
break
first_iteration = False
return chunk_write_buffer._tobytes() | Pops a chunk of the given max size.
Optimized to avoid too much string copies.
Args:
chunk_max_size (int): max size of the returned chunk.
Returns:
string (bytes) with a size <= chunk_max_size. | entailment |
def with_name(self, name):
"""Return a new path with the file name changed."""
if not self.name:
raise ValueError("%r has an empty name" % (self,))
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name]) | Return a new path with the file name changed. | entailment |
def with_suffix(self, suffix):
"""Return a new path with the file suffix changed (or added, if none)."""
# XXX if suffix is None, should the current suffix be removed?
drv, root, parts = self._flavour.parse_parts((suffix,))
if drv or root or len(parts) != 1:
raise ValueError("Invalid suffix %r" % (suffix))
suffix = parts[0]
if not suffix.startswith('.'):
raise ValueError("Invalid suffix %r" % (suffix))
name = self.name
if not name:
raise ValueError("%r has an empty name" % (self,))
old_suffix = self.suffix
if not old_suffix:
name = name + suffix
else:
name = name[:-len(old_suffix)] + suffix
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name]) | Return a new path with the file suffix changed (or added, if none). | entailment |
def _raw_open(self, flags, mode=0o777):
"""
Open the file pointed by this path and return a file descriptor,
as os.open() does.
"""
return self._accessor.open(self, flags, mode) | Open the file pointed by this path and return a file descriptor,
as os.open() does. | entailment |
def iterdir(self):
"""Iterate over the files in this directory. Does not yield any
result for the special paths '.' and '..'.
"""
for name in self._accessor.listdir(self):
if name in ('.', '..'):
# Yielding a path object for these makes little sense
continue
yield self._make_child_relpath(name) | Iterate over the files in this directory. Does not yield any
result for the special paths '.' and '..'. | entailment |
def absolute(self):
"""Return an absolute version of this path. This function works
even if the path doesn't point to anything.
No normalization is done, i.e. all '.' and '..' will be kept along.
Use resolve() to get the canonical path to a file.
"""
# XXX untested yet!
if self.is_absolute():
return self
# FIXME this must defer to the specific flavour (and, under Windows,
# use nt._getfullpathname())
obj = self._from_parts([os.getcwd()] + self._parts, init=False)
obj._init(template=self)
return obj | Return an absolute version of this path. This function works
even if the path doesn't point to anything.
No normalization is done, i.e. all '.' and '..' will be kept along.
Use resolve() to get the canonical path to a file. | entailment |
def resolve(self):
"""
Make the path absolute, resolving all symlinks on the way and also
normalizing it (for example turning slashes into backslashes under
Windows).
"""
s = self._flavour.resolve(self)
if s is None:
# No symlink resolution => for consistency, raise an error if
# the path doesn't exist or is forbidden
self.stat()
s = str(self.absolute())
# Now we have no symlinks in the path, it's safe to normalize it.
normed = self._flavour.pathmod.normpath(s)
obj = self._from_parts((normed,), init=False)
obj._init(template=self)
return obj | Make the path absolute, resolving all symlinks on the way and also
normalizing it (for example turning slashes into backslashes under
Windows). | entailment |
def open(self, mode='r', buffering=-1, encoding=None,
errors=None, newline=None):
"""
Open the file pointed by this path and return a file object, as
the built-in open() function does.
"""
if sys.version_info >= (3, 3):
return io.open(str(self), mode, buffering, encoding, errors, newline,
opener=self._opener)
else:
return io.open(str(self), mode, buffering, encoding, errors, newline) | Open the file pointed by this path and return a file object, as
the built-in open() function does. | entailment |
def replace(self, target):
"""
Rename this path to the given path, clobbering the existing
destination if it exists.
"""
if sys.version_info < (3, 3):
raise NotImplementedError("replace() is only available "
"with Python 3.3 and later")
self._accessor.replace(self, target) | Rename this path to the given path, clobbering the existing
destination if it exists. | entailment |
def symlink_to(self, target, target_is_directory=False):
"""
Make this path a symlink pointing to the given path.
Note the order of arguments (self, target) is the reverse of os.symlink's.
"""
self._accessor.symlink(target, self, target_is_directory) | Make this path a symlink pointing to the given path.
Note the order of arguments (self, target) is the reverse of os.symlink's. | entailment |
def is_symlink(self):
"""
Whether this path is a symbolic link.
"""
try:
return S_ISLNK(self.lstat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist
return False | Whether this path is a symbolic link. | entailment |
def is_block_device(self):
"""
Whether this path is a block device.
"""
try:
return S_ISBLK(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False | Whether this path is a block device. | entailment |
def is_char_device(self):
"""
Whether this path is a character device.
"""
try:
return S_ISCHR(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False | Whether this path is a character device. | entailment |
def grid_coords_from_corners(upper_left_corner, lower_right_corner, size):
''' Points are the outer edges of the UL and LR pixels. Size is rows, columns.
GC projection type is taken from Points. '''
assert upper_left_corner.wkt == lower_right_corner.wkt
geotransform = np.array([upper_left_corner.lon, -(upper_left_corner.lon - lower_right_corner.lon) / float(size[1]), 0,
upper_left_corner.lat, 0, -(upper_left_corner.lat - lower_right_corner.lat) / float(size[0])])
return GridCoordinates(geotransform=geotransform,
wkt=upper_left_corner.wkt,
y_size=size[0],
x_size=size[1]) | Points are the outer edges of the UL and LR pixels. Size is rows, columns.
GC projection type is taken from Points. | entailment |
def intersects(self, other_grid_coordinates):
""" returns True if the GC's overlap. """
ogc = other_grid_coordinates # alias
# for explanation: http://stackoverflow.com/questions/306316/determine-if-two-rectangles-overlap-each-other
# Note the flipped y-coord in this coord system.
ax1, ay1, ax2, ay2 = self.ULC.lon, self.ULC.lat, self.LRC.lon, self.LRC.lat
bx1, by1, bx2, by2 = ogc.ULC.lon, ogc.ULC.lat, ogc.LRC.lon, ogc.LRC.lat
if ((ax1 <= bx2) and (ax2 >= bx1) and (ay1 >= by2) and (ay2 <= by1)):
return True
else:
return False | returns True if the GC's overlap. | entailment |
def unique_str(self):
""" A string that (ideally) uniquely represents this GC object. This
helps with naming files for caching. 'Unique' is defined as 'If
GC1 != GC2, then GC1.unique_str() != GC2.unique_str()'; conversely,
'If GC1 == GC2, then GC1.unique_str() == GC2.unique_str()'.
The string should be filename-safe (no \/:*?"<>|).
..note::Because of length/readability restrictions, this fxn ignores
wkt.
Example output:
"-180.000_0.250_0.000_90.000_0.000_-0.251_512_612_2013-05-21_12_32_52.945000"
"""
unique_str = "_".join(["%.3f" % f for f in self.geotransform] +
["%d" % d for d in self.x_size, self.y_size]
)
if self.date is not None:
unique_str += '_' + str(self.date)
if self.time is not None:
unique_str += '_' + str(self.time)
return unique_str.replace(':', '_') | A string that (ideally) uniquely represents this GC object. This
helps with naming files for caching. 'Unique' is defined as 'If
GC1 != GC2, then GC1.unique_str() != GC2.unique_str()'; conversely,
'If GC1 == GC2, then GC1.unique_str() == GC2.unique_str()'.
The string should be filename-safe (no \/:*?"<>|).
..note::Because of length/readability restrictions, this fxn ignores
wkt.
Example output:
"-180.000_0.250_0.000_90.000_0.000_-0.251_512_612_2013-05-21_12_32_52.945000" | entailment |
def _get_x_axis(self):
"""See http://www.gdal.org/gdal_datamodel.html for details."""
# 0,0 is top/left top top/left pixel. Actual x/y coord of that pixel are (.5,.5).
x_centers = np.linspace(.5, self.x_size - .5, self.x_size)
y_centers = x_centers * 0
return (self.geotransform[0]
+ self.geotransform[1] * x_centers
+ self.geotransform[2] * y_centers) | See http://www.gdal.org/gdal_datamodel.html for details. | entailment |
def _get_y_axis(self):
"""See http://www.gdal.org/gdal_datamodel.html for details."""
# 0,0 is top/left top top/left pixel. Actual x/y coord of that pixel are (.5,.5).
y_centers = np.linspace(.5, self.y_size - .5, self.y_size)
x_centers = y_centers * 0
return (self.geotransform[3]
+ self.geotransform[4] * x_centers
+ self.geotransform[5] * y_centers) | See http://www.gdal.org/gdal_datamodel.html for details. | entailment |
def raster_to_projection_coords(self, pixel_x, pixel_y):
""" Use pixel centers when appropriate.
See documentation for the GDAL function GetGeoTransform for details. """
h_px_py = np.array([1, pixel_x, pixel_y])
gt = np.array([[1, 0, 0], self.geotransform[0:3], self.geotransform[3:6]])
arr = np.inner(gt, h_px_py)
return arr[2], arr[1] | Use pixel centers when appropriate.
See documentation for the GDAL function GetGeoTransform for details. | entailment |
def projection_to_raster_coords(self, lat, lon):
""" Returns pixel centers.
See documentation for the GDAL function GetGeoTransform for details. """
r_px_py = np.array([1, lon, lat])
tg = inv(np.array([[1, 0, 0], self.geotransform[0:3], self.geotransform[3:6]]))
return np.inner(tg, r_px_py)[1:] | Returns pixel centers.
See documentation for the GDAL function GetGeoTransform for details. | entailment |
def reproject_to_grid_coordinates(self, grid_coordinates, interp=gdalconst.GRA_NearestNeighbour):
""" Reprojects data in this layer to match that in the GridCoordinates
object. """
source_dataset = self.grid_coordinates._as_gdal_dataset()
dest_dataset = grid_coordinates._as_gdal_dataset()
rb = source_dataset.GetRasterBand(1)
rb.SetNoDataValue(NO_DATA_VALUE)
rb.WriteArray(np.ma.filled(self.raster_data, NO_DATA_VALUE))
gdal.ReprojectImage(source_dataset, dest_dataset,
source_dataset.GetProjection(),
dest_dataset.GetProjection(),
interp)
dest_layer = self.clone_traits()
dest_layer.grid_coordinates = grid_coordinates
rb = dest_dataset.GetRasterBand(1)
dest_layer.raster_data = np.ma.masked_values(rb.ReadAsArray(), NO_DATA_VALUE)
return dest_layer | Reprojects data in this layer to match that in the GridCoordinates
object. | entailment |
def inpaint(self):
""" Replace masked-out elements in an array using an iterative image inpainting algorithm. """
import inpaint
filled = inpaint.replace_nans(np.ma.filled(self.raster_data, np.NAN).astype(np.float32), 3, 0.01, 2)
self.raster_data = np.ma.masked_invalid(filled) | Replace masked-out elements in an array using an iterative image inpainting algorithm. | entailment |
def interp_value(self, lat, lon, indexed=False):
""" Lookup a pixel value in the raster data, performing linear interpolation
if necessary. Indexed ==> nearest neighbor (*fast*). """
(px, py) = self.grid_coordinates.projection_to_raster_coords(lat, lon)
if indexed:
return self.raster_data[round(py), round(px)]
else:
# from scipy.interpolate import interp2d
# f_interp = interp2d(self.grid_coordinates.x_axis, self.grid_coordinates.y_axis, self.raster_data, bounds_error=True)
# return f_interp(lon, lat)[0]
from scipy.ndimage import map_coordinates
ret = map_coordinates(self.raster_data, [[py], [px]], order=1) # linear interp
return ret[0] | Lookup a pixel value in the raster data, performing linear interpolation
if necessary. Indexed ==> nearest neighbor (*fast*). | entailment |
def get_connected_client(self):
"""Gets a connected Client object.
If max_size is reached, this method will block until a new client
object is available.
Returns:
A Future object with connected Client instance as a result
(or ClientError if there was a connection problem)
"""
if self.__sem is not None:
yield self.__sem.acquire()
client = None
newly_created, client = self._get_client_from_pool_or_make_it()
if newly_created:
res = yield client.connect()
if not res:
LOG.warning("can't connect to %s", client.title)
raise tornado.gen.Return(
ClientError("can't connect to %s" % client.title))
raise tornado.gen.Return(client) | Gets a connected Client object.
If max_size is reached, this method will block until a new client
object is available.
Returns:
A Future object with connected Client instance as a result
(or ClientError if there was a connection problem) | entailment |
def get_client_nowait(self):
"""Gets a Client object (not necessary connected).
If max_size is reached, this method will return None (and won't block).
Returns:
A Client instance (not necessary connected) as result (or None).
"""
if self.__sem is not None:
if self.__sem._value == 0:
return None
self.__sem.acquire()
_, client = self._get_client_from_pool_or_make_it()
return client | Gets a Client object (not necessary connected).
If max_size is reached, this method will return None (and won't block).
Returns:
A Client instance (not necessary connected) as result (or None). | entailment |
def connected_client(self):
"""Returns a ContextManagerFuture to be yielded in a with statement.
Returns:
A ContextManagerFuture object.
Examples:
>>> with (yield pool.connected_client()) as client:
# client is a connected tornadis.Client instance
# it will be automatically released to the pool thanks to
# the "with" keyword
reply = yield client.call("PING")
"""
future = self.get_connected_client()
cb = functools.partial(self._connected_client_release_cb, future)
return ContextManagerFuture(future, cb) | Returns a ContextManagerFuture to be yielded in a with statement.
Returns:
A ContextManagerFuture object.
Examples:
>>> with (yield pool.connected_client()) as client:
# client is a connected tornadis.Client instance
# it will be automatically released to the pool thanks to
# the "with" keyword
reply = yield client.call("PING") | entailment |
def release_client(self, client):
"""Releases a client object to the pool.
Args:
client: Client object.
"""
if isinstance(client, Client):
if not self._is_expired_client(client):
LOG.debug('Client is not expired. Adding back to pool')
self.__pool.append(client)
elif client.is_connected():
LOG.debug('Client is expired and connected. Disconnecting')
client.disconnect()
if self.__sem is not None:
self.__sem.release() | Releases a client object to the pool.
Args:
client: Client object. | entailment |
def destroy(self):
"""Disconnects all pooled client objects."""
while True:
try:
client = self.__pool.popleft()
if isinstance(client, Client):
client.disconnect()
except IndexError:
break | Disconnects all pooled client objects. | entailment |
def preconnect(self, size=-1):
"""(pre)Connects some or all redis clients inside the pool.
Args:
size (int): number of redis clients to build and to connect
(-1 means all clients if pool max_size > -1)
Raises:
ClientError: when size == -1 and pool max_size == -1
"""
if size == -1 and self.max_size == -1:
raise ClientError("size=-1 not allowed with pool max_size=-1")
limit = min(size, self.max_size) if size != -1 else self.max_size
clients = yield [self.get_connected_client() for _ in range(0, limit)]
for client in clients:
self.release_client(client) | (pre)Connects some or all redis clients inside the pool.
Args:
size (int): number of redis clients to build and to connect
(-1 means all clients if pool max_size > -1)
Raises:
ClientError: when size == -1 and pool max_size == -1 | entailment |
def setup_path(invoke_minversion=None):
"""Setup python search and add ``TASKS_VENDOR_DIR`` (if available)."""
# print("INVOKE.tasks: setup_path")
if not os.path.isdir(TASKS_VENDOR_DIR):
print("SKIP: TASKS_VENDOR_DIR=%s is missing" % TASKS_VENDOR_DIR)
return
elif os.path.abspath(TASKS_VENDOR_DIR) in sys.path:
# -- SETUP ALREADY DONE:
# return
pass
use_vendor_bundles = os.environ.get("INVOKE_TASKS_USE_VENDOR_BUNDLES", "no")
if need_vendor_bundles(invoke_minversion):
use_vendor_bundles = "yes"
if use_vendor_bundles == "yes":
syspath_insert(0, os.path.abspath(TASKS_VENDOR_DIR))
if setup_path_for_bundle(INVOKE_BUNDLE, pos=1):
import invoke
bundle_path = os.path.relpath(INVOKE_BUNDLE, os.getcwd())
print("USING: %s (version: %s)" % (bundle_path, invoke.__version__))
else:
# -- BEST-EFFORT: May rescue something
syspath_append(os.path.abspath(TASKS_VENDOR_DIR))
setup_path_for_bundle(INVOKE_BUNDLE, pos=len(sys.path))
if DEBUG_SYSPATH:
for index, p in enumerate(sys.path):
print(" %d. %s" % (index, p)) | Setup python search and add ``TASKS_VENDOR_DIR`` (if available). | entailment |
def require_invoke_minversion(min_version, verbose=False):
"""Ensures that :mod:`invoke` has at the least the :param:`min_version`.
Otherwise,
:param min_version: Minimal acceptable invoke version (as string).
:param verbose: Indicates if invoke.version should be shown.
:raises: VersionRequirementError=SystemExit if requirement fails.
"""
# -- REQUIRES: sys.path is setup and contains invoke
try:
import invoke
invoke_version = invoke.__version__
except ImportError:
invoke_version = "__NOT_INSTALLED"
if invoke_version < min_version:
message = "REQUIRE: invoke.version >= %s (but was: %s)" % \
(min_version, invoke_version)
message += "\nUSE: pip install invoke>=%s" % min_version
raise VersionRequirementError(message)
INVOKE_VERSION = os.environ.get("INVOKE_VERSION", None)
if verbose and not INVOKE_VERSION:
os.environ["INVOKE_VERSION"] = invoke_version
print("USING: invoke.version=%s" % invoke_version) | Ensures that :mod:`invoke` has at the least the :param:`min_version`.
Otherwise,
:param min_version: Minimal acceptable invoke version (as string).
:param verbose: Indicates if invoke.version should be shown.
:raises: VersionRequirementError=SystemExit if requirement fails. | entailment |
def matches_section(section_name):
"""Decorator for SectionSchema classes to define the mapping between
a config section schema class and one or more config sections with
matching name(s).
.. sourcecode::
@matches_section("foo")
class FooSchema(SectionSchema):
pass
@matches_section(["bar", "baz.*"])
class BarAndBazSchema(SectionSchema):
pass
.. sourcecode:: ini
# -- FILE: *.ini
[foo] # USE: FooSchema
...
[bar] # USE: BarAndBazSchema
...
[baz.alice] # USE: BarAndBazSchema
...
"""
section_names = section_name
if isinstance(section_name, six.string_types):
section_names = [section_name]
elif not isinstance(section_name, (list, tuple)):
raise ValueError("%r (expected: string, strings)" % section_name)
def decorator(cls):
class_section_names = getattr(cls, "section_names", None)
if class_section_names is None:
cls.section_names = list(section_names)
else:
# -- BETTER SUPPORT: For multiple decorators
# @matches_section("foo")
# @matches_section("bar.*")
# class Example(SectionSchema):
# pass
# assert Example.section_names == ["foo", "bar.*"]
approved = [name for name in section_names
if name not in cls.section_names]
cls.section_names = approved + cls.section_names
return cls
return decorator | Decorator for SectionSchema classes to define the mapping between
a config section schema class and one or more config sections with
matching name(s).
.. sourcecode::
@matches_section("foo")
class FooSchema(SectionSchema):
pass
@matches_section(["bar", "baz.*"])
class BarAndBazSchema(SectionSchema):
pass
.. sourcecode:: ini
# -- FILE: *.ini
[foo] # USE: FooSchema
...
[bar] # USE: BarAndBazSchema
...
[baz.alice] # USE: BarAndBazSchema
... | entailment |
def assign_param_names(cls=None, param_class=None):
"""Class decorator to assign parameter name to instances of :class:`Param`.
.. sourcecode::
@assign_param_names
class ConfigSectionSchema(object):
alice = Param(type=str)
bob = Param(type=str)
assert ConfigSectionSchema.alice.name == "alice"
assert ConfigSectionSchema.bob.name == "bob"
.. sourcecode::
# -- NESTED ASSIGN: Covers also nested SectionSchema subclasses.
@assign_param_names
class ConfigSectionSchema(object):
class Foo(SectionSchema):
alice = Param(type=str)
bob = Param(type=str)
assert ConfigSectionSchema.Foo.alice.name == "alice"
assert ConfigSectionSchema.Foo.bob.name == "bob"
"""
if param_class is None:
param_class = Param
def decorate_class(cls):
for name, value in select_params_from_section_schema(cls, param_class,
deep=True):
# -- ANNOTATE PARAM: By assigning its name
if not value.name:
value.name = name
return cls
# -- DECORATOR LOGIC:
if cls is None:
# -- CASE: @assign_param_names
# -- CASE: @assign_param_names(...)
return decorate_class
else:
# -- CASE: @assign_param_names class X: ...
# -- CASE: assign_param_names(my_class)
# -- CASE: my_class = assign_param_names(my_class)
return decorate_class(cls) | Class decorator to assign parameter name to instances of :class:`Param`.
.. sourcecode::
@assign_param_names
class ConfigSectionSchema(object):
alice = Param(type=str)
bob = Param(type=str)
assert ConfigSectionSchema.alice.name == "alice"
assert ConfigSectionSchema.bob.name == "bob"
.. sourcecode::
# -- NESTED ASSIGN: Covers also nested SectionSchema subclasses.
@assign_param_names
class ConfigSectionSchema(object):
class Foo(SectionSchema):
alice = Param(type=str)
bob = Param(type=str)
assert ConfigSectionSchema.Foo.alice.name == "alice"
assert ConfigSectionSchema.Foo.bob.name == "bob" | entailment |
def select_params_from_section_schema(section_schema, param_class=Param,
deep=False):
"""Selects the parameters of a config section schema.
:param section_schema: Configuration file section schema to use.
:return: Generator of params
"""
# pylint: disable=invalid-name
for name, value in inspect.getmembers(section_schema):
if name.startswith("__") or value is None:
continue # pragma: no cover
elif inspect.isclass(value) and deep:
# -- CASE: class => SELF-CALL (recursively).
# pylint: disable= bad-continuation
cls = value
for name, value in select_params_from_section_schema(cls,
param_class=param_class, deep=True):
yield (name, value)
elif isinstance(value, param_class):
yield (name, value) | Selects the parameters of a config section schema.
:param section_schema: Configuration file section schema to use.
:return: Generator of params | entailment |
def parse_config_section(config_section, section_schema):
"""Parse a config file section (INI file) by using its schema/description.
.. sourcecode::
import configparser # -- NOTE: Use backport for Python2
import click
from click_configfile import SectionSchema, Param, parse_config_section
class ConfigSectionSchema(object):
class Foo(SectionSchema):
name = Param(type=str)
flag = Param(type=bool)
numbers = Param(type=int, multiple=True)
filenames = Param(type=click.Path(), multiple=True)
parser = configparser.ConfigParser()
parser.read(["foo.ini"])
config_section = parser["foo"]
data = parse_config_section(config_section, ConfigSectionSchema.Foo)
# -- FAILS WITH: click.BadParameter if conversion errors occur.
.. sourcecode:: ini
# -- FILE: foo.ini
[foo]
name = Alice
flag = yes # true, false, yes, no (case-insensitive)
numbers = 1 4 9 16 25
filenames = foo/xxx.txt
bar/baz/zzz.txt
:param config_section: Config section to parse
:param section_schema: Schema/description of config section (w/ Param).
:return: Retrieved data, values converted to described types.
:raises: click.BadParameter, if conversion error occurs.
"""
storage = {}
for name, param in select_params_from_section_schema(section_schema):
value = config_section.get(name, None)
if value is None:
if param.default is None:
continue
value = param.default
else:
value = param.parse(value)
# -- DIAGNOSTICS:
# print(" %s = %s" % (name, repr(value)))
storage[name] = value
return storage | Parse a config file section (INI file) by using its schema/description.
.. sourcecode::
import configparser # -- NOTE: Use backport for Python2
import click
from click_configfile import SectionSchema, Param, parse_config_section
class ConfigSectionSchema(object):
class Foo(SectionSchema):
name = Param(type=str)
flag = Param(type=bool)
numbers = Param(type=int, multiple=True)
filenames = Param(type=click.Path(), multiple=True)
parser = configparser.ConfigParser()
parser.read(["foo.ini"])
config_section = parser["foo"]
data = parse_config_section(config_section, ConfigSectionSchema.Foo)
# -- FAILS WITH: click.BadParameter if conversion errors occur.
.. sourcecode:: ini
# -- FILE: foo.ini
[foo]
name = Alice
flag = yes # true, false, yes, no (case-insensitive)
numbers = 1 4 9 16 25
filenames = foo/xxx.txt
bar/baz/zzz.txt
:param config_section: Config section to parse
:param section_schema: Schema/description of config section (w/ Param).
:return: Retrieved data, values converted to described types.
:raises: click.BadParameter, if conversion error occurs. | entailment |
def generate_configfile_names(config_files, config_searchpath=None):
"""Generates all configuration file name combinations to read.
.. sourcecode::
# -- ALGORITHM:
# First basenames/directories are prefered and override other files.
for config_path in reversed(config_searchpath):
for config_basename in reversed(config_files):
config_fname = os.path.join(config_path, config_basename)
if os.path.isfile(config_fname):
yield config_fname
:param config_files: List of config file basenames.
:param config_searchpath: List of directories to look for config files.
:return: List of available configuration file names (as generator)
"""
if config_searchpath is None:
config_searchpath = ["."]
for config_path in reversed(config_searchpath):
for config_basename in reversed(config_files):
config_fname = os.path.join(config_path, config_basename)
if os.path.isfile(config_fname):
# MAYBE: yield os.path.normpath(config_fname)
yield config_fname | Generates all configuration file name combinations to read.
.. sourcecode::
# -- ALGORITHM:
# First basenames/directories are prefered and override other files.
for config_path in reversed(config_searchpath):
for config_basename in reversed(config_files):
config_fname = os.path.join(config_path, config_basename)
if os.path.isfile(config_fname):
yield config_fname
:param config_files: List of config file basenames.
:param config_searchpath: List of directories to look for config files.
:return: List of available configuration file names (as generator) | entailment |
def select_config_sections(configfile_sections, desired_section_patterns):
"""Select a subset of the sections in a configuration file by using
a list of section names of list of section name patters
(supporting :mod:`fnmatch` wildcards).
:param configfile_sections: List of config section names (as strings).
:param desired_section_patterns:
:return: List of selected section names or empty list (as generator).
"""
for section_name in configfile_sections:
for desired_section_pattern in desired_section_patterns:
if fnmatch(section_name, desired_section_pattern):
yield section_name | Select a subset of the sections in a configuration file by using
a list of section names of list of section name patters
(supporting :mod:`fnmatch` wildcards).
:param configfile_sections: List of config section names (as strings).
:param desired_section_patterns:
:return: List of selected section names or empty list (as generator). | entailment |
def matches_section(cls, section_name, supported_section_names=None):
"""Indicates if this schema can be used for a config section
by using the section name.
:param section_name: Config section name to check.
:return: True, if this schema can be applied to the config section.
:return: Fals, if this schema does not match the config section.
"""
if supported_section_names is None:
supported_section_names = getattr(cls, "section_names", None)
# pylint: disable=invalid-name
for supported_section_name_or_pattern in supported_section_names:
if fnmatch(section_name, supported_section_name_or_pattern):
return True
# -- OTHERWISE:
return False | Indicates if this schema can be used for a config section
by using the section name.
:param section_name: Config section name to check.
:return: True, if this schema can be applied to the config section.
:return: Fals, if this schema does not match the config section. | entailment |
def collect_config_sections_from_schemas(cls, config_section_schemas=None):
# pylint: disable=invalid-name
"""Derive support config section names from config section schemas.
If no :param:`config_section_schemas` are provided, the schemas from
this class are used (normally defined in the DerivedClass).
:param config_section_schemas: List of config section schema classes.
:return: List of config section names or name patterns (as string).
"""
if config_section_schemas is None:
config_section_schemas = cls.config_section_schemas
collected = []
for schema in config_section_schemas:
collected.extend(schema.section_names)
# -- MAYBE BETTER:
# for name in schema.section_names:
# if name not in collected:
# collected.append(name)
return collected | Derive support config section names from config section schemas.
If no :param:`config_section_schemas` are provided, the schemas from
this class are used (normally defined in the DerivedClass).
:param config_section_schemas: List of config section schema classes.
:return: List of config section names or name patterns (as string). | entailment |
def process_config_section(cls, config_section, storage):
"""Process the config section and store the extracted data in
the param:`storage` (as outgoing param).
"""
# -- CONCEPT:
# if not storage:
# # -- INIT DATA: With default parts.
# storage.update(dict(_PERSONS={}))
schema = cls.select_config_schema_for(config_section.name)
if not schema:
message = "No schema found for: section=%s"
raise LookupError(message % config_section.name)
# -- PARSE AND STORE CONFIG SECTION:
section_storage = cls.select_storage_for(config_section.name, storage)
section_data = parse_config_section(config_section, schema)
section_storage.update(section_data) | Process the config section and store the extracted data in
the param:`storage` (as outgoing param). | entailment |
def select_config_schema_for(cls, section_name):
"""Select the config schema that matches the config section (by name).
:param section_name: Config section name (as key).
:return: Config section schmema to use (subclass of: SectionSchema).
"""
# pylint: disable=cell-var-from-loop, redefined-outer-name
for section_schema in cls.config_section_schemas:
schema_matches = getattr(section_schema, "matches_section", None)
if schema_matches is None:
# -- OTHER SCHEMA CLASS: Reuse SectionSchema functionality.
schema_matches = lambda name: SectionSchema.matches_section(
name, section_schema.section_names)
if schema_matches(section_name):
return section_schema
return None | Select the config schema that matches the config section (by name).
:param section_name: Config section name (as key).
:return: Config section schmema to use (subclass of: SectionSchema). | entailment |
def select_storage_for(cls, section_name, storage):
"""Selects the data storage for a config section within the
:param:`storage`. The primary config section is normally merged into
the :param:`storage`.
:param section_name: Config section (name) to process.
:param storage: Data storage to use.
:return: :param:`storage` or a part of it (as section storage).
"""
section_storage = storage
storage_name = cls.get_storage_name_for(section_name)
if storage_name:
section_storage = storage.get(storage_name, None)
if section_storage is None:
section_storage = storage[storage_name] = dict()
return section_storage | Selects the data storage for a config section within the
:param:`storage`. The primary config section is normally merged into
the :param:`storage`.
:param section_name: Config section (name) to process.
:param storage: Data storage to use.
:return: :param:`storage` or a part of it (as section storage). | entailment |
def clean(ctx, dry_run=False):
"""Cleanup temporary dirs/files to regain a clean state."""
# -- VARIATION-POINT 1: Allow user to override in configuration-file
directories = ctx.clean.directories
files = ctx.clean.files
# -- VARIATION-POINT 2: Allow user to add more files/dirs to be removed.
extra_directories = ctx.clean.extra_directories or []
extra_files = ctx.clean.extra_files or []
if extra_directories:
directories.extend(extra_directories)
if extra_files:
files.extend(extra_files)
# -- PERFORM CLEANUP:
execute_cleanup_tasks(ctx, cleanup_tasks, dry_run=dry_run)
cleanup_dirs(directories, dry_run=dry_run)
cleanup_files(files, dry_run=dry_run) | Cleanup temporary dirs/files to regain a clean state. | entailment |
def clean_all(ctx, dry_run=False):
"""Clean up everything, even the precious stuff.
NOTE: clean task is executed first.
"""
cleanup_dirs(ctx.clean_all.directories or [], dry_run=dry_run)
cleanup_dirs(ctx.clean_all.extra_directories or [], dry_run=dry_run)
cleanup_files(ctx.clean_all.files or [], dry_run=dry_run)
cleanup_files(ctx.clean_all.extra_files or [], dry_run=dry_run)
execute_cleanup_tasks(ctx, cleanup_all_tasks, dry_run=dry_run)
clean(ctx, dry_run=dry_run) | Clean up everything, even the precious stuff.
NOTE: clean task is executed first. | entailment |
def clean_python(ctx, dry_run=False):
"""Cleanup python related files/dirs: *.pyc, *.pyo, ..."""
# MAYBE NOT: "**/__pycache__"
cleanup_dirs(["build", "dist", "*.egg-info", "**/__pycache__"],
dry_run=dry_run)
if not dry_run:
ctx.run("py.cleanup")
cleanup_files(["**/*.pyc", "**/*.pyo", "**/*$py.class"], dry_run=dry_run) | Cleanup python related files/dirs: *.pyc, *.pyo, ... | entailment |
def cleanup_files(patterns, dry_run=False, workdir="."):
"""Remove files or files selected by file patterns.
Skips removal if file does not exist.
:param patterns: File patterns, like "**/*.pyc" (as list).
:param dry_run: Dry-run mode indicator (as bool).
:param workdir: Current work directory (default=".")
"""
current_dir = Path(workdir)
python_basedir = Path(Path(sys.executable).dirname()).joinpath("..").abspath()
error_message = None
error_count = 0
for file_pattern in patterns:
for file_ in path_glob(file_pattern, current_dir):
if file_.abspath().startswith(python_basedir):
# -- PROTECT CURRENTLY USED VIRTUAL ENVIRONMENT:
continue
if dry_run:
print("REMOVE: %s (dry-run)" % file_)
else:
print("REMOVE: %s" % file_)
try:
file_.remove_p()
except os.error as e:
message = "%s: %s" % (e.__class__.__name__, e)
print(message + " basedir: "+ python_basedir)
error_count += 1
if not error_message:
error_message = message
if False and error_message:
class CleanupError(RuntimeError): pass
raise CleanupError(error_message) | Remove files or files selected by file patterns.
Skips removal if file does not exist.
:param patterns: File patterns, like "**/*.pyc" (as list).
:param dry_run: Dry-run mode indicator (as bool).
:param workdir: Current work directory (default=".") | entailment |
def path_glob(pattern, current_dir=None):
"""Use pathlib for ant-like patterns, like: "**/*.py"
:param pattern: File/directory pattern to use (as string).
:param current_dir: Current working directory (as Path, pathlib.Path, str)
:return Resolved Path (as path.Path).
"""
if not current_dir:
current_dir = pathlib.Path.cwd()
elif not isinstance(current_dir, pathlib.Path):
# -- CASE: string, path.Path (string-like)
current_dir = pathlib.Path(str(current_dir))
for p in current_dir.glob(pattern):
yield Path(str(p)) | Use pathlib for ant-like patterns, like: "**/*.py"
:param pattern: File/directory pattern to use (as string).
:param current_dir: Current working directory (as Path, pathlib.Path, str)
:return Resolved Path (as path.Path). | entailment |
def stack_call(self, *args):
"""Stacks a redis command inside the object.
The syntax is the same than the call() method a Client class.
Args:
*args: full redis command as variable length argument list.
Examples:
>>> pipeline = Pipeline()
>>> pipeline.stack_call("HSET", "key", "field", "value")
>>> pipeline.stack_call("PING")
>>> pipeline.stack_call("INCR", "key2")
"""
self.pipelined_args.append(args)
self.number_of_stacked_calls = self.number_of_stacked_calls + 1 | Stacks a redis command inside the object.
The syntax is the same than the call() method a Client class.
Args:
*args: full redis command as variable length argument list.
Examples:
>>> pipeline = Pipeline()
>>> pipeline.stack_call("HSET", "key", "field", "value")
>>> pipeline.stack_call("PING")
>>> pipeline.stack_call("INCR", "key2") | entailment |
def connect(self):
"""Connects the object to the host:port.
Returns:
Future: a Future object with True as result if the connection
process was ok.
"""
if self.is_connected() or self.is_connecting():
raise tornado.gen.Return(True)
if self.unix_domain_socket is None:
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.tcp_nodelay:
self.__socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, 1)
else:
if not os.path.exists(self.unix_domain_socket):
LOG.warning("can't connect to %s, file does not exist",
self.unix_domain_socket)
raise tornado.gen.Return(False)
self.__socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.__socket.setblocking(0)
self.__periodic_callback.start()
try:
LOG.debug("connecting to %s...", self._redis_server())
self._state.set_connecting()
if self.unix_domain_socket is None:
self.__socket.connect((self.host, self.port))
else:
self.__socket.connect(self.unix_domain_socket)
except socket.error as e:
if (errno_from_exception(e) not in _ERRNO_INPROGRESS and
errno_from_exception(e) not in _ERRNO_WOULDBLOCK):
self.disconnect()
LOG.warning("can't connect to %s", self._redis_server())
raise tornado.gen.Return(False)
self.__socket_fileno = self.__socket.fileno()
self._register_or_update_event_handler()
yield self._state.get_changed_state_future()
if not self.is_connected():
LOG.warning("can't connect to %s", self._redis_server())
raise tornado.gen.Return(False)
else:
LOG.debug("connected to %s", self._redis_server())
self.__socket_fileno = self.__socket.fileno()
self._state.set_connected()
self._register_or_update_event_handler()
raise tornado.gen.Return(True) | Connects the object to the host:port.
Returns:
Future: a Future object with True as result if the connection
process was ok. | entailment |
def disconnect(self):
"""Disconnects the object.
Safe method (no exception, even if it's already disconnected or if
there are some connection errors).
"""
if not self.is_connected() and not self.is_connecting():
return
LOG.debug("disconnecting from %s...", self._redis_server())
self.__periodic_callback.stop()
try:
self._ioloop.remove_handler(self.__socket_fileno)
self._listened_events = 0
except Exception:
pass
self.__socket_fileno = -1
try:
self.__socket.close()
except Exception:
pass
self._state.set_disconnected()
self._close_callback()
LOG.debug("disconnected from %s", self._redis_server()) | Disconnects the object.
Safe method (no exception, even if it's already disconnected or if
there are some connection errors). | entailment |
def write(self, data):
"""Buffers some data to be sent to the host:port in a non blocking way.
So the data is always buffered and not sent on the socket in a
synchronous way.
You can give a WriteBuffer as parameter. The internal Connection
WriteBuffer will be extended with this one (without copying).
Args:
data (str or WriteBuffer): string (or WriteBuffer) to write to
the host:port.
"""
if isinstance(data, WriteBuffer):
self._write_buffer.append(data)
else:
if len(data) > 0:
self._write_buffer.append(data)
if self.aggressive_write:
self._handle_write()
if self._write_buffer._total_length > 0:
self._register_or_update_event_handler(write=True) | Buffers some data to be sent to the host:port in a non blocking way.
So the data is always buffered and not sent on the socket in a
synchronous way.
You can give a WriteBuffer as parameter. The internal Connection
WriteBuffer will be extended with this one (without copying).
Args:
data (str or WriteBuffer): string (or WriteBuffer) to write to
the host:port. | entailment |
def surrogate_escape(error):
"""
Simulate the Python 3 ``surrogateescape`` handler, but for Python 2 only.
"""
chars = error.object[error.start:error.end]
assert len(chars) == 1
val = ord(chars)
val += 0xdc00
return __builtin__.unichr(val), error.end | Simulate the Python 3 ``surrogateescape`` handler, but for Python 2 only. | entailment |
def _always_unicode(cls, path):
"""
Ensure the path as retrieved from a Python API, such as :func:`os.listdir`,
is a proper Unicode string.
"""
if PY3 or isinstance(path, text_type):
return path
return path.decode(sys.getfilesystemencoding(), 'surrogateescape') | Ensure the path as retrieved from a Python API, such as :func:`os.listdir`,
is a proper Unicode string. | entailment |
def namebase(self):
""" The same as :meth:`name`, but with one file extension stripped off.
For example,
``Path('/home/guido/python.tar.gz').name == 'python.tar.gz'``,
but
``Path('/home/guido/python.tar.gz').namebase == 'python.tar'``.
"""
base, ext = self.module.splitext(self.name)
return base | The same as :meth:`name`, but with one file extension stripped off.
For example,
``Path('/home/guido/python.tar.gz').name == 'python.tar.gz'``,
but
``Path('/home/guido/python.tar.gz').namebase == 'python.tar'``. | entailment |
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use :meth:`files` or :meth:`dirs` instead if you want a listing
of just files or just subdirectories.
The elements of the list are Path objects.
With the optional `pattern` argument, this only lists
items whose names match the given pattern.
.. seealso:: :meth:`files`, :meth:`dirs`
"""
if pattern is None:
pattern = '*'
return [
self / child
for child in map(self._always_unicode, os.listdir(self))
if self._next_class(child).fnmatch(pattern)
] | D.listdir() -> List of items in this directory.
Use :meth:`files` or :meth:`dirs` instead if you want a listing
of just files or just subdirectories.
The elements of the list are Path objects.
With the optional `pattern` argument, this only lists
items whose names match the given pattern.
.. seealso:: :meth:`files`, :meth:`dirs` | entailment |
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are Path objects.
This does not walk recursively into subdirectories
(but see :meth:`walkdirs`).
With the optional `pattern` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()] | D.dirs() -> List of this directory's subdirectories.
The elements of the list are Path objects.
This does not walk recursively into subdirectories
(but see :meth:`walkdirs`).
With the optional `pattern` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``. | entailment |
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are Path objects.
This does not walk into subdirectories (see :meth:`walkfiles`).
With the optional `pattern` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()] | D.files() -> List of the files in this directory.
The elements of the list are Path objects.
This does not walk into subdirectories (see :meth:`walkfiles`).
With the optional `pattern` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``. | entailment |
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional `pattern` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``'test'``.
The `errors=` keyword argument controls behavior when an
error occurs. The default is ``'strict'``, which causes an
exception. The other allowed values are ``'warn'`` (which
reports the error via :func:`warnings.warn()`), and ``'ignore'``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir | D.walkdirs() -> iterator over subdirs, recursively.
With the optional `pattern` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``'test'``.
The `errors=` keyword argument controls behavior when an
error occurs. The default is ``'strict'``, which causes an
exception. The other allowed values are ``'warn'`` (which
reports the error via :func:`warnings.warn()`), and ``'ignore'``. | entailment |
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument `pattern` limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f | D.walkfiles() -> iterator over files in D, recursively.
The optional argument `pattern` limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension. | entailment |
def open(self, *args, **kwargs):
""" Open this file and return a corresponding :class:`file` object.
Keyword arguments work as in :func:`io.open`. If the file cannot be
opened, an :class:`~exceptions.OSError` is raised.
"""
with io_error_compat():
return io.open(self, *args, **kwargs) | Open this file and return a corresponding :class:`file` object.
Keyword arguments work as in :func:`io.open`. If the file cannot be
opened, an :class:`~exceptions.OSError` is raised. | entailment |
def write_text(self, text, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the `append=True` keyword argument.
There are two differences between :meth:`write_text` and
:meth:`write_bytes`: newline handling and Unicode handling.
See below.
Parameters:
`text` - str/unicode - The text to be written.
`encoding` - str - The Unicode encoding that will be used.
This is ignored if `text` isn't a Unicode string.
`errors` - str - How to handle Unicode encoding errors.
Default is ``'strict'``. See ``help(unicode.encode)`` for the
options. This is ignored if `text` isn't a Unicode
string.
`linesep` - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
:data:`os.linesep`. You can also specify ``None`` to
leave all newlines as they are in `text`.
`append` - keyword argument - bool - Specifies what to do if
the file already exists (``True``: append to the end of it;
``False``: overwrite it.) The default is ``False``.
--- Newline handling.
``write_text()`` converts all standard end-of-line sequences
(``'\n'``, ``'\r'``, and ``'\r\n'``) to your platform's default
end-of-line sequence (see :data:`os.linesep`; on Windows, for example,
the end-of-line marker is ``'\r\n'``).
If you don't like your platform's default, you can override it
using the `linesep=` keyword argument. If you specifically want
``write_text()`` to preserve the newlines as-is, use ``linesep=None``.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
``u'\x85'``, ``u'\r\x85'``, and ``u'\u2028'``.
(This is slightly different from when you open a file for
writing with ``fopen(filename, "w")`` in C or ``open(filename, 'w')``
in Python.)
--- Unicode
If `text` isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The `encoding` and
`errors` arguments are not used and must be omitted.
If `text` is Unicode, it is first converted to :func:`bytes` using the
specified `encoding` (or the default encoding if `encoding`
isn't specified). The `errors` argument applies only to this
conversion.
"""
if isinstance(text, text_type):
if linesep is not None:
text = U_NEWLINE.sub(linesep, text)
text = text.encode(encoding or sys.getdefaultencoding(), errors)
else:
assert encoding is None
text = NEWLINE.sub(linesep, text)
self.write_bytes(text, append=append) | r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the `append=True` keyword argument.
There are two differences between :meth:`write_text` and
:meth:`write_bytes`: newline handling and Unicode handling.
See below.
Parameters:
`text` - str/unicode - The text to be written.
`encoding` - str - The Unicode encoding that will be used.
This is ignored if `text` isn't a Unicode string.
`errors` - str - How to handle Unicode encoding errors.
Default is ``'strict'``. See ``help(unicode.encode)`` for the
options. This is ignored if `text` isn't a Unicode
string.
`linesep` - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
:data:`os.linesep`. You can also specify ``None`` to
leave all newlines as they are in `text`.
`append` - keyword argument - bool - Specifies what to do if
the file already exists (``True``: append to the end of it;
``False``: overwrite it.) The default is ``False``.
--- Newline handling.
``write_text()`` converts all standard end-of-line sequences
(``'\n'``, ``'\r'``, and ``'\r\n'``) to your platform's default
end-of-line sequence (see :data:`os.linesep`; on Windows, for example,
the end-of-line marker is ``'\r\n'``).
If you don't like your platform's default, you can override it
using the `linesep=` keyword argument. If you specifically want
``write_text()`` to preserve the newlines as-is, use ``linesep=None``.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
``u'\x85'``, ``u'\r\x85'``, and ``u'\u2028'``.
(This is slightly different from when you open a file for
writing with ``fopen(filename, "w")`` in C or ``open(filename, 'w')``
in Python.)
--- Unicode
If `text` isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The `encoding` and
`errors` arguments are not used and must be omitted.
If `text` is Unicode, it is first converted to :func:`bytes` using the
specified `encoding` (or the default encoding if `encoding`
isn't specified). The `errors` argument applies only to this
conversion. | entailment |
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See `linesep` below.
`lines` - A list of strings.
`encoding` - A Unicode encoding to use. This applies only if
`lines` contains any Unicode strings.
`errors` - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending (``'\r'``, ``'\n'``, ``'\r\n'``,
``u'\x85'``, ``u'\r\x85'``, ``u'\u2028'``), that will
be stripped off and this will be used instead. The
default is os.linesep, which is platform-dependent
(``'\r\n'`` on Windows, ``'\n'`` on Unix, etc.).
Specify ``None`` to write the lines as-is, like
:meth:`file.writelines`.
Use the keyword argument ``append=True`` to append lines to the
file. The default is to overwrite the file.
.. warning ::
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the `encoding=` parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
with self.open('ab' if append else 'wb') as f:
for l in lines:
isUnicode = isinstance(l, text_type)
if linesep is not None:
pattern = U_NL_END if isUnicode else NL_END
l = pattern.sub('', l) + linesep
if isUnicode:
l = l.encode(encoding or sys.getdefaultencoding(), errors)
f.write(l) | r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See `linesep` below.
`lines` - A list of strings.
`encoding` - A Unicode encoding to use. This applies only if
`lines` contains any Unicode strings.
`errors` - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending (``'\r'``, ``'\n'``, ``'\r\n'``,
``u'\x85'``, ``u'\r\x85'``, ``u'\u2028'``), that will
be stripped off and this will be used instead. The
default is os.linesep, which is platform-dependent
(``'\r\n'`` on Windows, ``'\n'`` on Unix, etc.).
Specify ``None`` to write the lines as-is, like
:meth:`file.writelines`.
Use the keyword argument ``append=True`` to append lines to the
file. The default is to overwrite the file.
.. warning ::
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the `encoding=` parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later. | entailment |
def mkdir_p(self, mode=0o777):
""" Like :meth:`mkdir`, but does not raise an exception if the
directory already exists. """
try:
self.mkdir(mode)
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.EEXIST:
raise
return self | Like :meth:`mkdir`, but does not raise an exception if the
directory already exists. | entailment |
def makedirs_p(self, mode=0o777):
""" Like :meth:`makedirs`, but does not raise an exception if the
directory already exists. """
try:
self.makedirs(mode)
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.EEXIST:
raise
return self | Like :meth:`makedirs`, but does not raise an exception if the
directory already exists. | entailment |
def rmdir_p(self):
""" Like :meth:`rmdir`, but does not raise an exception if the
directory is not empty or does not exist. """
try:
self.rmdir()
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
raise
return self | Like :meth:`rmdir`, but does not raise an exception if the
directory is not empty or does not exist. | entailment |
def removedirs_p(self):
""" Like :meth:`removedirs`, but does not raise an exception if the
directory is not empty or does not exist. """
try:
self.removedirs()
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
raise
return self | Like :meth:`removedirs`, but does not raise an exception if the
directory is not empty or does not exist. | entailment |
def remove_p(self):
""" Like :meth:`remove`, but does not raise an exception if the
file does not exist. """
try:
self.unlink()
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.ENOENT:
raise
return self | Like :meth:`remove`, but does not raise an exception if the
file does not exist. | entailment |
def rmtree_p(self):
""" Like :meth:`rmtree`, but does not raise an exception if the
directory does not exist. """
try:
self.rmtree()
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.ENOENT:
raise
return self | Like :meth:`rmtree`, but does not raise an exception if the
directory does not exist. | entailment |
def merge_tree(self, dst, symlinks=False, *args, **kwargs):
"""
Copy entire contents of self to dst, overwriting existing
contents in dst with those in self.
If the additional keyword `update` is True, each
`src` will only be copied if `dst` does not exist,
or `src` is newer than `dst`.
Note that the technique employed stages the files in a temporary
directory first, so this function is not suitable for merging
trees with large files, especially if the temporary directory
is not capable of storing a copy of the entire source tree.
"""
update = kwargs.pop('update', False)
with tempdir() as _temp_dir:
# first copy the tree to a stage directory to support
# the parameters and behavior of copytree.
stage = _temp_dir / str(hash(self))
self.copytree(stage, symlinks, *args, **kwargs)
# now copy everything from the stage directory using
# the semantics of dir_util.copy_tree
dir_util.copy_tree(stage, dst, preserve_symlinks=symlinks,
update=update) | Copy entire contents of self to dst, overwriting existing
contents in dst with those in self.
If the additional keyword `update` is True, each
`src` will only be copied if `dst` does not exist,
or `src` is newer than `dst`.
Note that the technique employed stages the files in a temporary
directory first, so this function is not suitable for merging
trees with large files, especially if the temporary directory
is not capable of storing a copy of the entire source tree. | entailment |
def __gdal_dataset_default(self):
"""DiskReader implementation."""
if not os.path.exists(self.file_name):
return None
if os.path.splitext(self.file_name)[1].lower() not in self.file_types:
raise RuntimeError('Filename %s does not have extension type %s.' % (self.file_name, self.file_types))
dataset = gdal.OpenShared(self.file_name, gdalconst.GA_ReadOnly)
if dataset is None:
raise ValueError('Dataset %s did not load properly.' % self.file_name)
# Sanity checks.
assert dataset.RasterCount > 0
# Seems okay...
return dataset | DiskReader implementation. | entailment |
def connect(self):
"""Connects the client object to redis.
It's safe to use this method even if you are already connected.
Note: this method is useless with autoconnect mode (default).
Returns:
a Future object with True as result if the connection was ok.
"""
if self.is_connected():
raise tornado.gen.Return(True)
cb1 = self._read_callback
cb2 = self._close_callback
self.__callback_queue = collections.deque()
self._reply_list = []
self.__reader = hiredis.Reader(replyError=ClientError)
kwargs = self.connection_kwargs
self.__connection = Connection(cb1, cb2, **kwargs)
connection_status = yield self.__connection.connect()
if connection_status is not True:
# nothing left to do here, return
raise tornado.gen.Return(False)
if self.password is not None:
authentication_status = yield self._call('AUTH', self.password)
if authentication_status != b'OK':
# incorrect password, return back the result
LOG.warning("impossible to connect: bad password")
self.__connection.disconnect()
raise tornado.gen.Return(False)
if self.db != 0:
db_status = yield self._call('SELECT', self.db)
if db_status != b'OK':
LOG.warning("can't select db %s", self.db)
raise tornado.gen.Return(False)
raise tornado.gen.Return(True) | Connects the client object to redis.
It's safe to use this method even if you are already connected.
Note: this method is useless with autoconnect mode (default).
Returns:
a Future object with True as result if the connection was ok. | entailment |
def _close_callback(self):
"""Callback called when redis closed the connection.
The callback queue is emptied and we call each callback found
with None or with an exception object to wake up blocked client.
"""
while True:
try:
callback = self.__callback_queue.popleft()
callback(ConnectionError("closed connection"))
except IndexError:
break
if self.subscribed:
# pubsub clients
self._reply_list.append(ConnectionError("closed connection"))
self._condition.notify_all() | Callback called when redis closed the connection.
The callback queue is emptied and we call each callback found
with None or with an exception object to wake up blocked client. | entailment |
def _read_callback(self, data=None):
"""Callback called when some data are read on the socket.
The buffer is given to the hiredis parser. If a reply is complete,
we put the decoded reply to on the reply queue.
Args:
data (str): string (buffer) read on the socket.
"""
try:
if data is not None:
self.__reader.feed(data)
while True:
reply = self.__reader.gets()
if reply is not False:
try:
callback = self.__callback_queue.popleft()
# normal client (1 reply = 1 callback)
callback(reply)
except IndexError:
# pubsub clients
self._reply_list.append(reply)
self._condition.notify_all()
else:
break
except hiredis.ProtocolError:
# something nasty occured (corrupt stream => no way to recover)
LOG.warning("corrupted stream => disconnect")
self.disconnect() | Callback called when some data are read on the socket.
The buffer is given to the hiredis parser. If a reply is complete,
we put the decoded reply to on the reply queue.
Args:
data (str): string (buffer) read on the socket. | entailment |
def call(self, *args, **kwargs):
"""Calls a redis command and returns a Future of the reply.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: internal private options (do not use).
Returns:
a Future with the decoded redis reply as result (when available) or
a ConnectionError object in case of connection error.
Raises:
ClientError: your Pipeline object is empty.
Examples:
>>> @tornado.gen.coroutine
def foobar():
client = Client()
result = yield client.call("HSET", "key", "field", "val")
"""
if not self.is_connected():
if self.autoconnect:
# We use this method only when we are not contected
# to void performance penaly due to gen.coroutine decorator
return self._call_with_autoconnect(*args, **kwargs)
else:
error = ConnectionError("you are not connected and "
"autoconnect=False")
return tornado.gen.maybe_future(error)
return self._call(*args, **kwargs) | Calls a redis command and returns a Future of the reply.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: internal private options (do not use).
Returns:
a Future with the decoded redis reply as result (when available) or
a ConnectionError object in case of connection error.
Raises:
ClientError: your Pipeline object is empty.
Examples:
>>> @tornado.gen.coroutine
def foobar():
client = Client()
result = yield client.call("HSET", "key", "field", "val") | entailment |
def async_call(self, *args, **kwargs):
"""Calls a redis command, waits for the reply and call a callback.
Following options are available (not part of the redis command itself):
- callback
Function called (with the result as argument) when the result
is available. If not set, the reply is silently discarded. In
case of errors, the callback is called with a
TornadisException object as argument.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: options as keyword parameters.
Examples:
>>> def cb(result):
pass
>>> client.async_call("HSET", "key", "field", "val", callback=cb)
"""
def after_autoconnect_callback(future):
if self.is_connected():
self._call(*args, **kwargs)
else:
# FIXME
pass
if 'callback' not in kwargs:
kwargs['callback'] = discard_reply_cb
if not self.is_connected():
if self.autoconnect:
connect_future = self.connect()
cb = after_autoconnect_callback
self.__connection._ioloop.add_future(connect_future, cb)
else:
error = ConnectionError("you are not connected and "
"autoconnect=False")
kwargs['callback'](error)
else:
self._call(*args, **kwargs) | Calls a redis command, waits for the reply and call a callback.
Following options are available (not part of the redis command itself):
- callback
Function called (with the result as argument) when the result
is available. If not set, the reply is silently discarded. In
case of errors, the callback is called with a
TornadisException object as argument.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: options as keyword parameters.
Examples:
>>> def cb(result):
pass
>>> client.async_call("HSET", "key", "field", "val", callback=cb) | entailment |
def format_args_in_redis_protocol(*args):
"""Formats arguments into redis protocol...
This function makes and returns a string/buffer corresponding to
given arguments formated with the redis protocol.
integer, text, string or binary types are automatically converted
(using utf8 if necessary).
More informations about the protocol: http://redis.io/topics/protocol
Args:
*args: full redis command as variable length argument list
Returns:
binary string (arguments in redis protocol)
Examples:
>>> format_args_in_redis_protocol("HSET", "key", "field", "value")
'*4\r\n$4\r\nHSET\r\n$3\r\nkey\r\n$5\r\nfield\r\n$5\r\nvalue\r\n'
"""
buf = WriteBuffer()
l = "*%d\r\n" % len(args) # noqa: E741
if six.PY2:
buf.append(l)
else: # pragma: no cover
buf.append(l.encode('utf-8'))
for arg in args:
if isinstance(arg, six.text_type):
# it's a unicode string in Python2 or a standard (unicode)
# string in Python3, let's encode it in utf-8 to get raw bytes
arg = arg.encode('utf-8')
elif isinstance(arg, six.string_types):
# it's a basestring in Python2 => nothing to do
pass
elif isinstance(arg, six.binary_type): # pragma: no cover
# it's a raw bytes string in Python3 => nothing to do
pass
elif isinstance(arg, six.integer_types):
tmp = "%d" % arg
if six.PY2:
arg = tmp
else: # pragma: no cover
arg = tmp.encode('utf-8')
elif isinstance(arg, WriteBuffer):
# it's a WriteBuffer object => nothing to do
pass
else:
raise Exception("don't know what to do with %s" % type(arg))
l = "$%d\r\n" % len(arg) # noqa: E741
if six.PY2:
buf.append(l)
else: # pragma: no cover
buf.append(l.encode('utf-8'))
buf.append(arg)
buf.append(b"\r\n")
return buf | Formats arguments into redis protocol...
This function makes and returns a string/buffer corresponding to
given arguments formated with the redis protocol.
integer, text, string or binary types are automatically converted
(using utf8 if necessary).
More informations about the protocol: http://redis.io/topics/protocol
Args:
*args: full redis command as variable length argument list
Returns:
binary string (arguments in redis protocol)
Examples:
>>> format_args_in_redis_protocol("HSET", "key", "field", "value")
'*4\r\n$4\r\nHSET\r\n$3\r\nkey\r\n$5\r\nfield\r\n$5\r\nvalue\r\n' | entailment |
def _done_callback(self, wrapped):
"""Internal "done callback" to set the result of the object.
The result of the object if forced by the wrapped future. So this
internal callback must be called when the wrapped future is ready.
Args:
wrapped (Future): the wrapped Future object
"""
if wrapped.exception():
self.set_exception(wrapped.exception())
else:
self.set_result(wrapped.result()) | Internal "done callback" to set the result of the object.
The result of the object if forced by the wrapped future. So this
internal callback must be called when the wrapped future is ready.
Args:
wrapped (Future): the wrapped Future object | entailment |
def result(self):
"""The result method which returns a context manager
Returns:
ContextManager: The corresponding context manager
"""
if self.exception():
raise self.exception()
# Otherwise return a context manager that cleans up after the block.
@contextlib.contextmanager
def f():
try:
yield self._wrapped.result()
finally:
self._exit_callback()
return f() | The result method which returns a context manager
Returns:
ContextManager: The corresponding context manager | entailment |
def create_url(artist, song):
"""Create the URL in the LyricWikia format"""
return (__BASE_URL__ +
'/wiki/{artist}:{song}'.format(artist=urlize(artist),
song=urlize(song))) | Create the URL in the LyricWikia format | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.