text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_sides(self, key, data, field, local=False): """ Assign data on the 'key' tile to all the edges """
for side in ['left', 'right', 'top', 'bottom']: self.set(key, data, field, side, local)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_neighbor_data(self, neighbor_side, data, key, field): """ Assign data from the 'key' tile to the edge on the neighboring tile which is on the 'neighbor_side' of the 'key' tile. The data is assigned to the 'field' attribute of the neihboring tile's edge. """
i = self.keys[key] found = False sides = [] if 'left' in neighbor_side: if i % self.n_cols == 0: return None i -= 1 sides.append('right') found = True if 'right' in neighbor_side: if i % self.n_cols == self.n_cols - 1: return None i += 1 sides.append('left') found = True if 'top' in neighbor_side: sides.append('bottom') i -= self.n_cols found = True if 'bottom' in neighbor_side: sides.append('top') i += self.n_cols found = True if not found: print "Side '%s' not found" % neighbor_side # Check if i is in range if i < 0 or i >= self.n_chunks: return None # Otherwise, set the data for side in sides: self.set_i(i, data, field, side)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_all_neighbors_data(self, data, done, key): """ Given they 'key' tile's data, assigns this information to all neighboring tiles """
# The order of this for loop is important because the topleft gets # it's data from the left neighbor, which should have already been # updated... for side in ['left', 'right', 'top', 'bottom', 'topleft', 'topright', 'bottomleft', 'bottomright']: self.set_neighbor_data(side, data, key, 'data') # self.set_neighbor_data(side, todo, key, 'todo') self.set_neighbor_data(side, done, key, 'done')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fill_n_todo(self): """ Calculate and record the number of edge pixels left to do on each tile """
left = self.left right = self.right top = self.top bottom = self.bottom for i in xrange(self.n_chunks): self.n_todo.ravel()[i] = np.sum([left.ravel()[i].n_todo, right.ravel()[i].n_todo, top.ravel()[i].n_todo, bottom.ravel()[i].n_todo])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fill_n_done(self): """ Calculate and record the number of edge pixels that are done one each tile. """
left = self.left right = self.right top = self.top bottom = self.bottom for i in xrange(self.n_chunks): self.n_done.ravel()[i] = np.sum([left.ravel()[i].n_done, right.ravel()[i].n_done, top.ravel()[i].n_done, bottom.ravel()[i].n_done])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fill_percent_done(self): """ Calculate the percentage of edge pixels that would be done if the tile was reprocessed. This is done for each tile. """
left = self.left right = self.right top = self.top bottom = self.bottom for i in xrange(self.n_chunks): self.percent_done.ravel()[i] = \ np.sum([left.ravel()[i].percent_done, right.ravel()[i].percent_done, top.ravel()[i].percent_done, bottom.ravel()[i].percent_done]) self.percent_done.ravel()[i] /= \ np.sum([left.ravel()[i].percent_done > 0, right.ravel()[i].percent_done > 0, top.ravel()[i].percent_done > 0, bottom.ravel()[i].percent_done > 0, 1e-16])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fix_shapes(self): """ Fixes the shape of the data fields on edges. Left edges should be column vectors, and top edges should be row vectors, for example. """
for i in xrange(self.n_chunks): for side in ['left', 'right', 'top', 'bottom']: edge = getattr(self, side).ravel()[i] if side in ['left', 'right']: shp = [edge.todo.size, 1] else: shp = [1, edge.todo.size] edge.done = edge.done.reshape(shp) edge.data = edge.data.reshape(shp) edge.todo = edge.todo.reshape(shp)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_best_candidate(self): """ Determine which tile, when processed, would complete the largest percentage of unresolved edge pixels. This is a heuristic function and does not give the optimal tile. """
self.fill_percent_done() i_b = np.argmax(self.percent_done.ravel()) if self.percent_done.ravel()[i_b] <= 0: return None # check for ties I = self.percent_done.ravel() == self.percent_done.ravel()[i_b] if I.sum() == 1: return i_b else: I2 = np.argmax(self.max_elev.ravel()[I]) return I.nonzero()[0][I2]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_array(self, array, name=None, partname=None, rootpath='.', raw=False, as_int=True): """ Standard array saving routine Parameters array : array Array to save to file name : str, optional Default 'array.tif'. Filename of array to save. Over-writes partname. partname : str, optional Part of the filename to save (with the coordinates appended) rootpath : str, optional Default '.'. Which directory to save file raw : bool, optional Default False. If true will save a .npz of the array. If false, will save a geotiff as_int : bool, optional Default True. If true will save array as an integer array ( excellent compression). If false will save as float array. """
if name is None and partname is not None: fnl_file = self.get_full_fn(partname, rootpath) tmp_file = os.path.join(rootpath, partname, self.get_fn(partname + '_tmp')) elif name is not None: fnl_file = name tmp_file = fnl_file + '_tmp.tiff' else: fnl_file = 'array.tif' if not raw: s_file = self.elev.clone_traits() s_file.raster_data = np.ma.masked_array(array) count = 10 while count > 0 and (s_file.raster_data.mask.sum() > 0 \ or np.isnan(s_file.raster_data).sum() > 0): s_file.inpaint() count -= 1 s_file.export_to_geotiff(tmp_file) if as_int: cmd = "gdalwarp -multi -wm 2000 -co BIGTIFF=YES -of GTiff -co compress=lzw -ot Int16 -co TILED=YES -wo OPTIMIZE_SIZE=YES -r near -t_srs %s %s %s" \ % (self.save_projection, tmp_file, fnl_file) else: cmd = "gdalwarp -multi -wm 2000 -co BIGTIFF=YES -of GTiff -co compress=lzw -co TILED=YES -wo OPTIMIZE_SIZE=YES -r near -t_srs %s %s %s" \ % (self.save_projection, tmp_file, fnl_file) print "<<"*4, cmd, ">>"*4 subprocess.call(cmd) os.remove(tmp_file) else: np.savez_compressed(fnl_file, array)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_uca(self, rootpath, raw=False, as_int=False): """ Saves the upstream contributing area to a file """
self.save_array(self.uca, None, 'uca', rootpath, raw, as_int=as_int)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_twi(self, rootpath, raw=False, as_int=True): """ Saves the topographic wetness index to a file """
self.twi = np.ma.masked_array(self.twi, mask=self.twi <= 0, fill_value=-9999) # self.twi = self.twi.filled() self.twi[self.flats] = 0 self.twi.mask[self.flats] = True # self.twi = self.flats self.save_array(self.twi, None, 'twi', rootpath, raw, as_int=as_int)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_slope(self, rootpath, raw=False, as_int=False): """ Saves the magnitude of the slope to a file """
self.save_array(self.mag, None, 'mag', rootpath, raw, as_int=as_int)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_direction(self, rootpath, raw=False, as_int=False): """ Saves the direction of the slope to a file """
self.save_array(self.direction, None, 'ang', rootpath, raw, as_int=as_int)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_outputs(self, rootpath='.', raw=False): """Saves TWI, UCA, magnitude and direction of slope to files. """
self.save_twi(rootpath, raw) self.save_uca(rootpath, raw) self.save_slope(rootpath, raw) self.save_direction(rootpath, raw)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_array(self, fn, name): """ Can only load files that were saved in the 'raw' format. Loads previously computed field 'name' from file Valid names are 'mag', 'direction', 'uca', 'twi' """
if os.path.exists(fn + '.npz'): array = np.load(fn + '.npz') try: setattr(self, name, array['arr_0']) except Exception, e: print e finally: array.close() else: raise RuntimeError("File %s does not exist." % (fn + '.npz'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _assign_chunk(self, data, arr1, arr2, te, be, le, re, ovr, add=False): """ Assign data from a chunk to the full array. The data in overlap regions will not be assigned to the full array Parameters data : array Unused array (except for shape) that has size of full tile arr1 : array Full size array to which data will be assigned arr2 : array Chunk-sized array from which data will be assigned te : int Top edge id be : int Bottom edge id le : int Left edge id re : int Right edge id ovr : int The number of pixels in the overlap add : bool, optional Default False. If true, the data in arr2 will be added to arr1, otherwise data in arr2 will overwrite data in arr1 """
if te == 0: i1 = 0 else: i1 = ovr if be == data.shape[0]: i2 = 0 i2b = None else: i2 = -ovr i2b = -ovr if le == 0: j1 = 0 else: j1 = ovr if re == data.shape[1]: j2 = 0 j2b = None else: j2 = -ovr j2b = -ovr if add: arr1[te+i1:be+i2, le+j1:re+j2] += arr2[i1:i2b, j1:j2b] else: arr1[te+i1:be+i2, le+j1:re+j2] = arr2[i1:i2b, j1:j2b]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _slopes_directions(self, data, dX, dY, method='tarboton'): """ Wrapper to pick between various algorithms """
# %% if method == 'tarboton': return self._tarboton_slopes_directions(data, dX, dY) elif method == 'central': return self._central_slopes_directions(data, dX, dY)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _find_flats_edges(self, data, mag, direction): """ Extend flats 1 square downstream Flats on the downstream side of the flat might find a valid angle, but that doesn't mean that it's a correct angle. We have to find these and then set them equal to a flat """
i12 = np.arange(data.size).reshape(data.shape) flat = mag == FLAT_ID_INT flats, n = spndi.label(flat, structure=FLATS_KERNEL3) objs = spndi.find_objects(flats) f = flat.ravel() d = data.ravel() for i, _obj in enumerate(objs): region = flats[_obj] == i+1 I = i12[_obj][region] J = get_adjacent_index(I, data.shape, data.size) f[J] = d[J] == d[I[0]] flat = f.reshape(data.shape) return flat
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _drain_step(self, A, ids, area, done, edge_todo): """ Does a single step of the upstream contributing area calculation. Here the pixels in ids are drained downstream, the areas are updated and the next set of pixels to drain are determined for the next round. """
# Only drain to cells that have a contribution A_todo = A[:, ids.ravel()] colsum = np.array(A_todo.sum(1)).ravel() # Only touch cells that actually receive a contribution # during this stage ids_new = colsum != 0 # Is it possible that I may drain twice from my own cell? # -- No, I don't think so... # Is it possible that other cells may drain into me in # multiple iterations -- yes # Then say I check for when I'm done ensures that I don't drain until # everyone has drained into me area.ravel()[ids_new] += (A_todo[ids_new, :] * (area.ravel()[ids].ravel())) edge_todo.ravel()[ids_new] += (A_todo[ids_new, :] * (edge_todo.ravel()[ids].ravel())) # Figure out what's left to do. done.ravel()[ids] = True colsum = A * (~done.ravel()) ids = colsum == 0 # Figure out the new-undrained ids ids = ids & (~done.ravel()) return ids, area, done, edge_todo
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _mk_adjacency_matrix(self, section, proportion, flats, elev, mag, dX, dY): """ Calculates the adjacency of connectivity matrix. This matrix tells which pixels drain to which. For example, the pixel i, will recieve area from np.nonzero(A[i, :]) at the proportions given in A[i, :]. So, the row gives the pixel drain to, and the columns the pixels drained from. """
shp = section.shape mat_data = np.row_stack((proportion, 1 - proportion)) NN = np.prod(shp) i12 = np.arange(NN).reshape(shp) j1 = - np.ones_like(i12) j2 = - np.ones_like(i12) # make the connectivity for the non-flats/pits j1, j2 = self._mk_connectivity(section, i12, j1, j2) j = np.row_stack((j1, j2)) i = np.row_stack((i12, i12)) # connectivity for flats/pits if self.drain_pits: pit_i, pit_j, pit_prop, flats, mag = \ self._mk_connectivity_pits(i12, flats, elev, mag, dX, dY) j = np.concatenate([j.ravel(), pit_j]).astype('int64') i = np.concatenate([i.ravel(), pit_i]).astype('int64') mat_data = np.concatenate([mat_data.ravel(), pit_prop]) elif self.drain_flats: j1, j2, mat_data, flat_i, flat_j, flat_prop = \ self._mk_connectivity_flats( i12, j1, j2, mat_data, flats, elev, mag) j = np.concatenate([j.ravel(), flat_j]).astype('int64') i = np.concatenate([i.ravel(), flat_j]).astype('int64') mat_data = np.concatenate([mat_data.ravel(), flat_prop]) # This prevents no-data values, remove connections when not present, # and makes sure that floating point precision errors do not # create circular references where a lower elevation cell drains # to a higher elevation cell I = ~np.isnan(mat_data) & (j != -1) & (mat_data > 1e-8) \ & (elev.ravel()[j] <= elev.ravel()[i]) mat_data = mat_data[I] j = j[I] i = i[I] # %%Make the matrix and initialize # What is A? The row i area receives area contributions from the # entries in its columns. If all the entries in my columns have # drained, then I can drain. A = sps.csc_matrix((mat_data.ravel(), np.row_stack((j.ravel(), i.ravel()))), shape=(NN, NN)) normalize = np.array(A.sum(0) + 1e-16).squeeze() A = np.dot(A, sps.diags(1/normalize, 0)) return A
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calc_twi(self): """ Calculates the topographic wetness index and saves the result in self.twi. Returns ------- twi : array Array giving the topographic wetness index at each pixel """
if self.uca is None: self.calc_uca() gc.collect() # Just in case min_area = self.twi_min_area min_slope = self.twi_min_slope twi = self.uca.copy() if self.apply_twi_limits_on_uca: twi[twi > self.uca_saturation_limit * min_area] = \ self.uca_saturation_limit * min_area gc.collect() # Just in case twi = np.log((twi) / (self.mag + min_slope)) # apply the cap if self.apply_twi_limits: twi_sat_value = \ np.log(self.uca_saturation_limit * min_area / min_slope) twi[twi > twi_sat_value] = twi_sat_value # multiply by 10 for better integer resolution when storing self.twi = twi * 10 gc.collect() # Just in case return twi
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _plot_debug_slopes_directions(self): """ A debug function to plot the direction calculated in various ways. """
# %% from matplotlib.pyplot import matshow, colorbar, clim, title matshow(self.direction / np.pi * 180); colorbar(); clim(0, 360) title('Direction') mag2, direction2 = self._central_slopes_directions() matshow(direction2 / np.pi * 180.0); colorbar(); clim(0, 360) title('Direction (central difference)') matshow(self.mag); colorbar() title('Magnitude') matshow(mag2); colorbar(); title("Magnitude (Central difference)") # %% # Compare to Taudem filename = self.file_name os.chdir('testtiff') try: os.remove('test_ang.tif') os.remove('test_slp.tif') except: pass cmd = ('dinfflowdir -fel "%s" -ang "%s" -slp "%s"' % (os.path.split(filename)[-1], 'test_ang.tif', 'test_slp.tif')) taudem._run(cmd) td_file = GdalReader(file_name='test_ang.tif') td_ang, = td_file.raster_layers td_file2 = GdalReader(file_name='test_slp.tif') td_mag, = td_file2.raster_layers os.chdir('..') matshow(td_ang.raster_data / np.pi*180); clim(0, 360); colorbar() title('Taudem direction') matshow(td_mag.raster_data); colorbar() title('Taudem magnitude') matshow(self.data); colorbar() title('The test data (elevation)') diff = (td_ang.raster_data - self.direction) / np.pi * 180.0 diff[np.abs(diff) > 300] = np.nan matshow(diff); colorbar(); clim([-1, 1]) title('Taudem direction - calculated Direction') # normalize magnitudes mag2 = td_mag.raster_data mag2 /= np.nanmax(mag2) mag = self.mag.copy() mag /= np.nanmax(mag) matshow(mag - mag2); colorbar() title('Taudem magnitude - calculated magnitude') del td_file del td_file2 del td_ang del td_mag
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean(ctx, dry_run=False): """Cleanup generated document artifacts."""
basedir = ctx.sphinx.destdir or "build/docs" cleanup_dirs([basedir], dry_run=dry_run)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build(ctx, builder="html", options=""): """Build docs with sphinx-build"""
sourcedir = ctx.config.sphinx.sourcedir destdir = Path(ctx.config.sphinx.destdir or "build")/builder destdir = destdir.abspath() with cd(sourcedir): destdir_relative = Path(".").relpathto(destdir) command = "sphinx-build {opts} -b {builder} {sourcedir} {destdir}" \ .format(builder=builder, sourcedir=".", destdir=destdir_relative, opts=options) ctx.run(command)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def browse(ctx): """Open documentation in web browser."""
page_html = Path(ctx.config.sphinx.destdir)/"html"/"index.html" if not page_html.exists(): build(ctx, builder="html") assert page_html.exists() open_cmd = "open" # -- WORKS ON: MACOSX if sys.platform.startswith("win"): open_cmd = "start" ctx.run("{open} {page_html}".format(open=open_cmd, page_html=page_html))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_neighbors(neighbors, coords, I, source_files, f, sides): """Find the tile neighbors based on filenames Parameters neighbors : dict Dictionary that stores the neighbors. Format is neighbors["source_file_name"]["side"] = "neighbor_source_file_name" coords : list List of coordinates determined from the filename. See :py:func:`utils.parse_fn` I : array Sort index. Different sorting schemes will speed up when neighbors are found source_files : list List of strings of source file names f : callable Function that determines if two tiles are neighbors based on their coordinates. f(c1, c2) returns True if tiles are neighbors sides : list List of 2 strings that give the "side" where tiles are neighbors. Returns ------- neighbors : dict Dictionary of neighbors Notes ------- For example, if Tile1 is to the left of Tile2, then neighbors['Tile1']['right'] = 'Tile2' neighbors['Tile2']['left'] = 'Tile1' """
for i, c1 in enumerate(coords): me = source_files[I[i]] # If the left neighbor has already been found... if neighbors[me][sides[0]] != '': continue # could try coords[i:] (+ fixes) for speed if it becomes a problem for j, c2 in enumerate(coords): if f(c1, c2): # then tiles are neighbors neighbors neigh = source_files[I[j]] neighbors[me][sides[0]] = neigh neighbors[neigh][sides[1]] = me break return neighbors
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_neighbor_data(self, elev_fn, dem_proc, interp=None): """ From the elevation filename, we can figure out and load the data and done arrays. """
if interp is None: interp = self.build_interpolator(dem_proc) opp = {'top': 'bottom', 'left': 'right'} for key in self.neighbors[elev_fn].keys(): tile = self.neighbors[elev_fn][key] if tile == '': continue oppkey = key for me, neigh in opp.iteritems(): if me in key: oppkey = oppkey.replace(me, neigh) else: oppkey = oppkey.replace(neigh, me) opp_edge = self.neighbors[tile][oppkey] if opp_edge == '': continue interp.values = dem_proc.uca[::-1, :] # interp.values[:, 0] = np.ravel(dem_proc.uca) # for other interp. # for the top-left tile we have to set the bottom and right edges # of that tile, so two edges for those tiles for key_ed in oppkey.split('-'): self.edges[tile][key_ed].set_data('data', interp) interp.values = dem_proc.edge_done[::-1, :].astype(float) # interp.values[:, 0] = np.ravel(dem_proc.edge_done) for key_ed in oppkey.split('-'): self.edges[tile][key_ed].set_data('done', interp)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_edge_todo(self, elev_fn, dem_proc): """ Can figure out how to update the todo based on the elev filename """
for key in self.edges[elev_fn].keys(): self.edges[elev_fn][key].set_data('todo', data=dem_proc.edge_todo)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_edges(self, elev_fn, dem_proc): """ After finishing a calculation, this will update the neighbors and the todo for that tile """
interp = self.build_interpolator(dem_proc) self.update_edge_todo(elev_fn, dem_proc) self.set_neighbor_data(elev_fn, dem_proc, interp)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_edge_init_data(self, fn, save_path=None): """ Creates the initialization data from the edge structure """
edge_init_data = {key: self.edges[fn][key].get('data') for key in self.edges[fn].keys()} edge_init_done = {key: self.edges[fn][key].get('done') for key in self.edges[fn].keys()} edge_init_todo = {key: self.edges[fn][key].get('todo') for key in self.edges[fn].keys()} return edge_init_data, edge_init_done, edge_init_todo
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_best_candidate(self, elev_source_files=None): """ Heuristically determines which tile should be recalculated based on updated edge information. Presently does not check if that tile is locked, which could lead to a parallel thread closing while one thread continues to process tiles. """
self.fill_percent_done() i_b = np.argmax(self.percent_done.values()) if self.percent_done.values()[i_b] <= 0: return None # check for ties I = np.array(self.percent_done.values()) == \ self.percent_done.values()[i_b] if I.sum() == 1: pass # no ties else: I2 = np.argmax(np.array(self.max_elev.values())[I]) i_b = I.nonzero()[0][I2] # Make sure the apples are still apples assert(np.array(self.max_elev.keys())[I][I2] == np.array(self.percent_done.keys())[I][I2]) if elev_source_files is not None: fn = self.percent_done.keys()[i_b] lckfn = _get_lockfile_name(fn) if os.path.exists(lckfn): # another process is working on it # Find a different Candidate i_alt = np.argsort(self.percent_done.values())[::-1] for i in i_alt: fn = self.percent_done.keys()[i] lckfn = _get_lockfile_name(fn) if not os.path.exists(lckfn): break # Get and return the index i_b = elev_source_files.index(fn) return i_b
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_command(self, command, save_name='custom', index=None): """ Processes the hillshading Parameters index : int/slice (optional) Default: None - process all tiles in source directory. Otherwise, will only process the index/indices of the files as listed in self.elev_source_files """
if index is not None: elev_source_files = [self.elev_source_files[index]] else: elev_source_files = self.elev_source_files save_root = os.path.join(self.save_path, save_name) if not os.path.exists(save_root): os.makedirs(save_root) for i, esfile in enumerate(elev_source_files): try: status = 'Success' # optimism # Check if file is locked lckfn = _get_lockfile_name(esfile) coords = parse_fn(esfile) fn = get_fn_from_coords(coords, save_name) fn = os.path.join(save_root, fn) if os.path.exists(lckfn): # another process is working on it print fn, 'is locked' status = 'locked' elif os.path.exists(fn): print fn, 'already exists' status = 'cached' else: # lock this tile print fn, '... calculating ', save_name fid = file(lckfn, 'w') fid.close() # Calculate the custom process for this tile status = command(esfile, fn) os.remove(lckfn) if index is None: self.custom_status[i] = status else: self.custom_status[index] = status except: lckfn = _get_lockfile_name(esfile) try: os.remove(lckfn) except: pass traceback.print_exc() print traceback.format_exc() if index is None: self.custom_status[i] = "Error " + traceback.format_exc() else: self.custom_status[index] = "Error " + traceback.format_exc()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rename_files(files, name=None): """ Given a list of file paths for elevation files, this function will rename those files to the format required by the pyDEM package. This assumes a .tif extension. Parameters files : list A list of strings of the paths to the elevation files that will be renamed name : str (optional) Default = None. A suffix to the filename. For example <filename>_suffix.tif Notes ------ The files are renamed in the same directory as the original file locations """
for fil in files: elev_file = GdalReader(file_name=fil) elev, = elev_file.raster_layers fn = get_fn(elev, name) del elev_file del elev fn = os.path.join(os.path.split(fil)[0], fn) os.rename(fil, fn) print "Renamed", fil, "to", fn
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_fn(fn): """ This parses the file name and returns the coordinates of the tile Parameters fn : str Filename of a GEOTIFF Returns -------- coords = [LLC.lat, LLC.lon, URC.lat, URC.lon] """
try: parts = os.path.splitext(os.path.split(fn)[-1])[0].replace('o', '.')\ .split('_')[:2] coords = [float(crds) for crds in re.split('[NSEW]', parts[0] + parts[1])[1:]] except: coords = [np.nan] * 4 return coords
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_fn(elev, name=None): """ Determines the standard filename for a given GeoTIFF Layer. Parameters elev : GdalReader.raster_layer A raster layer from the GdalReader object. name : str (optional) An optional suffix to the filename. Returns ------- fn : str The standard <filename>_<name>.tif with suffix (if supplied) """
gcs = elev.grid_coordinates coords = [gcs.LLC.lat, gcs.LLC.lon, gcs.URC.lat, gcs.URC.lon] return get_fn_from_coords(coords, name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_fn_from_coords(coords, name=None): """ Given a set of coordinates, returns the standard filename. Parameters coords : list [LLC.lat, LLC.lon, URC.lat, URC.lon] name : str (optional) An optional suffix to the filename. Returns ------- fn : str The standard <filename>_<name>.tif with suffix (if supplied) """
NS1 = ["S", "N"][coords[0] > 0] EW1 = ["W", "E"][coords[1] > 0] NS2 = ["S", "N"][coords[2] > 0] EW2 = ["W", "E"][coords[3] > 0] new_name = "%s%0.3g%s%0.3g_%s%0.3g%s%0.3g" % \ (NS1, coords[0], EW1, coords[1], NS2, coords[2], EW2, coords[3]) if name is not None: new_name += '_' + name return new_name.replace('.', 'o') + '.tif'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mk_dx_dy_from_geotif_layer(geotif): """ Extracts the change in x and y coordinates from the geotiff file. Presently only supports WGS-84 files. """
ELLIPSOID_MAP = {'WGS84': 'WGS-84'} ellipsoid = ELLIPSOID_MAP[geotif.grid_coordinates.wkt] d = distance(ellipsoid=ellipsoid) dx = geotif.grid_coordinates.x_axis dy = geotif.grid_coordinates.y_axis dX = np.zeros((dy.shape[0]-1)) for j in xrange(len(dX)): dX[j] = d.measure((dy[j+1], dx[1]), (dy[j+1], dx[0])) * 1000 # km2m dY = np.zeros((dy.shape[0]-1)) for i in xrange(len(dY)): dY[i] = d.measure((dy[i], 0), (dy[i+1], 0)) * 1000 # km2m return dX, dY
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mk_geotiff_obj(raster, fn, bands=1, gdal_data_type=gdal.GDT_Float32, lat=[46, 45], lon=[-73, -72]): """ Creates a new geotiff file objects using the WGS84 coordinate system, saves it to disk, and returns a handle to the python file object and driver Parameters raster : array Numpy array of the raster data to be added to the object fn : str Name of the geotiff file bands : int (optional) See :py:func:`gdal.GetDriverByName('Gtiff').Create gdal_data : gdal.GDT_<type> lat : list northern lat, southern lat lon : list [western lon, eastern lon] """
NNi, NNj = raster.shape driver = gdal.GetDriverByName('GTiff') obj = driver.Create(fn, NNj, NNi, bands, gdal_data_type) pixel_height = -np.abs(lat[0] - lat[1]) / (NNi - 1.0) pixel_width = np.abs(lon[0] - lon[1]) / (NNj - 1.0) obj.SetGeoTransform([lon[0], pixel_width, 0, lat[0], 0, pixel_height]) srs = osr.SpatialReference() srs.SetWellKnownGeogCS('WGS84') obj.SetProjection(srs.ExportToWkt()) obj.GetRasterBand(1).WriteArray(raster) return obj, driver
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sortrows(a, i=0, index_out=False, recurse=True): """ Sorts array "a" by columns i Parameters a : np.ndarray array to be sorted i : int (optional) column to be sorted by, taken as 0 by default index_out : bool (optional) return the index I such that a(I) = sortrows(a,i). Default = False recurse : bool (optional) recursively sort by each of the columns. i.e. once column i is sort, we sort the smallest column number etc. True by default. Returns -------- a : np.ndarray The array 'a' sorted in descending order by column i I : np.ndarray (optional) The index such that a[I, :] = sortrows(a, i). Only return if index_out = True Examples --------- array([[1, 2], [2, 3], [3, 1]]) c, I = sortrows(a,1,True) array([[3, 1], [1, 2], [2, 3]]) array([1, 0, 2]) array([[0, 0], [0, 0], [0, 0]]) """
I = np.argsort(a[:, i]) a = a[I, :] # We recursively call sortrows to make sure it is sorted best by every # column if recurse & (len(a[0]) > i + 1): for b in np.unique(a[:, i]): ids = a[:, i] == b colids = range(i) + range(i+1, len(a[0])) a[np.ix_(ids, colids)], I2 = sortrows(a[np.ix_(ids, colids)], 0, True, True) I[ids] = I[np.nonzero(ids)[0][I2]] if index_out: return a, I else: return a
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_border_index(I, shape, size): """ Get flattened indices for the border of the region I. Parameters I : np.ndarray(dtype=int) indices in the flattened region. size : int region size (technically computable from shape argument) shape : tuple(int, int) region shape Returns ------- J : np.ndarray(dtype=int) indices orthogonally and diagonally bordering I """
J = get_adjacent_index(I, shape, size) # instead of setdiff? # border = np.zeros(size) # border[J] = 1 # border[I] = 0 # J, = np.where(border) return np.setdiff1d(J, I)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_border_mask(region): """ Get border of the region as a boolean array mask. Parameters region : np.ndarray(shape=(m, n), dtype=bool) mask of the region Returns ------- border : np.ndarray(shape=(m, n), dtype=bool) mask of the region border (not including region) """
# common special case (for efficiency) internal = region[1:-1, 1:-1] if internal.all() and internal.any(): return ~region I, = np.where(region.ravel()) J = get_adjacent_index(I, region.shape, region.size) border = np.zeros(region.size, dtype='bool') border[J] = 1 border[I] = 0 border = border.reshape(region.shape) return border
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_distance(region, src): """ Compute within-region distances from the src pixels. Parameters region : np.ndarray(shape=(m, n), dtype=bool) mask of the region src : np.ndarray(shape=(m, n), dtype=bool) mask of the source pixels to compute distances from. Returns ------- d : np.ndarray(shape=(m, n), dtype=float) approximate within-region distance from the nearest src pixel; (distances outside of the region are arbitrary). """
dmax = float(region.size) d = np.full(region.shape, dmax) d[src] = 0 for n in range(region.size): d_orth = minimum_filter(d, footprint=_ORTH2) + 1 d_diag = minimum_filter(d, (3, 3)) + _SQRT2 d_adj = np.minimum(d_orth[region], d_diag[region]) d[region] = np.minimum(d_adj, d[region]) if (d[region] < dmax).all(): break return d
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def grow_slice(slc, size): """ Grow a slice object by 1 in each direction without overreaching the list. Parameters slc: slice slice object to grow size: int list length Returns ------- slc: slice extended slice """
return slice(max(0, slc.start-1), min(size, slc.stop+1))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_edge(obj, shape): """ Check if a 2d object is on the edge of the array. Parameters obj : tuple(slice, slice) Pair of slices (e.g. from scipy.ndimage.measurements.find_objects) shape : tuple(int, int) Array shape. Returns ------- b : boolean True if the object touches any edge of the array, else False. """
if obj[0].start == 0: return True if obj[1].start == 0: return True if obj[0].stop == shape[0]: return True if obj[1].stop == shape[1]: return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pop_chunk(self, chunk_max_size): """Pops a chunk of the given max size. Optimized to avoid too much string copies. Args: chunk_max_size (int): max size of the returned chunk. Returns: string (bytes) with a size <= chunk_max_size. """
if self._total_length < chunk_max_size: # fastpath (the whole queue fit in a single chunk) res = self._tobytes() self.clear() return res first_iteration = True while True: try: data = self._deque.popleft() data_length = len(data) self._total_length -= data_length if first_iteration: # first iteration if data_length == chunk_max_size: # we are lucky ! return data elif data_length > chunk_max_size: # we have enough data at first iteration # => fast path optimization view = self._get_pointer_or_memoryview(data, data_length) self.appendleft(view[chunk_max_size:]) return view[:chunk_max_size] else: # no single iteration fast path optimization :-( # let's use a WriteBuffer to build the result chunk chunk_write_buffer = WriteBuffer() else: # not first iteration if chunk_write_buffer._total_length + data_length \ > chunk_max_size: view = self._get_pointer_or_memoryview(data, data_length) limit = chunk_max_size - \ chunk_write_buffer._total_length - data_length self.appendleft(view[limit:]) data = view[:limit] chunk_write_buffer.append(data) if chunk_write_buffer._total_length >= chunk_max_size: break except IndexError: # the buffer is empty (so no memoryview inside) self._has_view = False break first_iteration = False return chunk_write_buffer._tobytes()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def absolute(self): """Return an absolute version of this path. This function works even if the path doesn't point to anything. No normalization is done, i.e. all '.' and '..' will be kept along. Use resolve() to get the canonical path to a file. """
# XXX untested yet! if self.is_absolute(): return self # FIXME this must defer to the specific flavour (and, under Windows, # use nt._getfullpathname()) obj = self._from_parts([os.getcwd()] + self._parts, init=False) obj._init(template=self) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_symlink(self): """ Whether this path is a symbolic link. """
try: return S_ISLNK(self.lstat().st_mode) except OSError as e: if e.errno != ENOENT: raise # Path doesn't exist return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_block_device(self): """ Whether this path is a block device. """
try: return S_ISBLK(self.stat().st_mode) except OSError as e: if e.errno != ENOENT: raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_char_device(self): """ Whether this path is a character device. """
try: return S_ISCHR(self.stat().st_mode) except OSError as e: if e.errno != ENOENT: raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def grid_coords_from_corners(upper_left_corner, lower_right_corner, size): ''' Points are the outer edges of the UL and LR pixels. Size is rows, columns. GC projection type is taken from Points. ''' assert upper_left_corner.wkt == lower_right_corner.wkt geotransform = np.array([upper_left_corner.lon, -(upper_left_corner.lon - lower_right_corner.lon) / float(size[1]), 0, upper_left_corner.lat, 0, -(upper_left_corner.lat - lower_right_corner.lat) / float(size[0])]) return GridCoordinates(geotransform=geotransform, wkt=upper_left_corner.wkt, y_size=size[0], x_size=size[1])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def intersects(self, other_grid_coordinates): """ returns True if the GC's overlap. """
ogc = other_grid_coordinates # alias # for explanation: http://stackoverflow.com/questions/306316/determine-if-two-rectangles-overlap-each-other # Note the flipped y-coord in this coord system. ax1, ay1, ax2, ay2 = self.ULC.lon, self.ULC.lat, self.LRC.lon, self.LRC.lat bx1, by1, bx2, by2 = ogc.ULC.lon, ogc.ULC.lat, ogc.LRC.lon, ogc.LRC.lat if ((ax1 <= bx2) and (ax2 >= bx1) and (ay1 >= by2) and (ay2 <= by1)): return True else: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def raster_to_projection_coords(self, pixel_x, pixel_y): """ Use pixel centers when appropriate. See documentation for the GDAL function GetGeoTransform for details. """
h_px_py = np.array([1, pixel_x, pixel_y]) gt = np.array([[1, 0, 0], self.geotransform[0:3], self.geotransform[3:6]]) arr = np.inner(gt, h_px_py) return arr[2], arr[1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def projection_to_raster_coords(self, lat, lon): """ Returns pixel centers. See documentation for the GDAL function GetGeoTransform for details. """
r_px_py = np.array([1, lon, lat]) tg = inv(np.array([[1, 0, 0], self.geotransform[0:3], self.geotransform[3:6]])) return np.inner(tg, r_px_py)[1:]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reproject_to_grid_coordinates(self, grid_coordinates, interp=gdalconst.GRA_NearestNeighbour): """ Reprojects data in this layer to match that in the GridCoordinates object. """
source_dataset = self.grid_coordinates._as_gdal_dataset() dest_dataset = grid_coordinates._as_gdal_dataset() rb = source_dataset.GetRasterBand(1) rb.SetNoDataValue(NO_DATA_VALUE) rb.WriteArray(np.ma.filled(self.raster_data, NO_DATA_VALUE)) gdal.ReprojectImage(source_dataset, dest_dataset, source_dataset.GetProjection(), dest_dataset.GetProjection(), interp) dest_layer = self.clone_traits() dest_layer.grid_coordinates = grid_coordinates rb = dest_dataset.GetRasterBand(1) dest_layer.raster_data = np.ma.masked_values(rb.ReadAsArray(), NO_DATA_VALUE) return dest_layer
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def inpaint(self): """ Replace masked-out elements in an array using an iterative image inpainting algorithm. """
import inpaint filled = inpaint.replace_nans(np.ma.filled(self.raster_data, np.NAN).astype(np.float32), 3, 0.01, 2) self.raster_data = np.ma.masked_invalid(filled)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_connected_client(self): """Gets a connected Client object. If max_size is reached, this method will block until a new client object is available. Returns: A Future object with connected Client instance as a result (or ClientError if there was a connection problem) """
if self.__sem is not None: yield self.__sem.acquire() client = None newly_created, client = self._get_client_from_pool_or_make_it() if newly_created: res = yield client.connect() if not res: LOG.warning("can't connect to %s", client.title) raise tornado.gen.Return( ClientError("can't connect to %s" % client.title)) raise tornado.gen.Return(client)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connected_client(self): """Returns a ContextManagerFuture to be yielded in a with statement. Returns: A ContextManagerFuture object. Examples: # client is a connected tornadis.Client instance # it will be automatically released to the pool thanks to # the "with" keyword reply = yield client.call("PING") """
future = self.get_connected_client() cb = functools.partial(self._connected_client_release_cb, future) return ContextManagerFuture(future, cb)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def release_client(self, client): """Releases a client object to the pool. Args: client: Client object. """
if isinstance(client, Client): if not self._is_expired_client(client): LOG.debug('Client is not expired. Adding back to pool') self.__pool.append(client) elif client.is_connected(): LOG.debug('Client is expired and connected. Disconnecting') client.disconnect() if self.__sem is not None: self.__sem.release()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def destroy(self): """Disconnects all pooled client objects."""
while True: try: client = self.__pool.popleft() if isinstance(client, Client): client.disconnect() except IndexError: break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def select_params_from_section_schema(section_schema, param_class=Param, deep=False): """Selects the parameters of a config section schema. :param section_schema: Configuration file section schema to use. :return: Generator of params """
# pylint: disable=invalid-name for name, value in inspect.getmembers(section_schema): if name.startswith("__") or value is None: continue # pragma: no cover elif inspect.isclass(value) and deep: # -- CASE: class => SELF-CALL (recursively). # pylint: disable= bad-continuation cls = value for name, value in select_params_from_section_schema(cls, param_class=param_class, deep=True): yield (name, value) elif isinstance(value, param_class): yield (name, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_configfile_names(config_files, config_searchpath=None): """Generates all configuration file name combinations to read. .. sourcecode:: # -- ALGORITHM: # First basenames/directories are prefered and override other files. for config_path in reversed(config_searchpath): for config_basename in reversed(config_files): config_fname = os.path.join(config_path, config_basename) if os.path.isfile(config_fname): yield config_fname :param config_files: List of config file basenames. :param config_searchpath: List of directories to look for config files. :return: List of available configuration file names (as generator) """
if config_searchpath is None: config_searchpath = ["."] for config_path in reversed(config_searchpath): for config_basename in reversed(config_files): config_fname = os.path.join(config_path, config_basename) if os.path.isfile(config_fname): # MAYBE: yield os.path.normpath(config_fname) yield config_fname
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def matches_section(cls, section_name, supported_section_names=None): """Indicates if this schema can be used for a config section by using the section name. :param section_name: Config section name to check. :return: True, if this schema can be applied to the config section. :return: Fals, if this schema does not match the config section. """
if supported_section_names is None: supported_section_names = getattr(cls, "section_names", None) # pylint: disable=invalid-name for supported_section_name_or_pattern in supported_section_names: if fnmatch(section_name, supported_section_name_or_pattern): return True # -- OTHERWISE: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cleanup_files(patterns, dry_run=False, workdir="."): """Remove files or files selected by file patterns. Skips removal if file does not exist. :param patterns: File patterns, like "**/*.pyc" (as list). :param dry_run: Dry-run mode indicator (as bool). :param workdir: Current work directory (default=".") """
current_dir = Path(workdir) python_basedir = Path(Path(sys.executable).dirname()).joinpath("..").abspath() error_message = None error_count = 0 for file_pattern in patterns: for file_ in path_glob(file_pattern, current_dir): if file_.abspath().startswith(python_basedir): # -- PROTECT CURRENTLY USED VIRTUAL ENVIRONMENT: continue if dry_run: print("REMOVE: %s (dry-run)" % file_) else: print("REMOVE: %s" % file_) try: file_.remove_p() except os.error as e: message = "%s: %s" % (e.__class__.__name__, e) print(message + " basedir: "+ python_basedir) error_count += 1 if not error_message: error_message = message if False and error_message: class CleanupError(RuntimeError): pass raise CleanupError(error_message)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stack_call(self, *args): """Stacks a redis command inside the object. The syntax is the same than the call() method a Client class. Args: *args: full redis command as variable length argument list. Examples: """
self.pipelined_args.append(args) self.number_of_stacked_calls = self.number_of_stacked_calls + 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def disconnect(self): """Disconnects the object. Safe method (no exception, even if it's already disconnected or if there are some connection errors). """
if not self.is_connected() and not self.is_connecting(): return LOG.debug("disconnecting from %s...", self._redis_server()) self.__periodic_callback.stop() try: self._ioloop.remove_handler(self.__socket_fileno) self._listened_events = 0 except Exception: pass self.__socket_fileno = -1 try: self.__socket.close() except Exception: pass self._state.set_disconnected() self._close_callback() LOG.debug("disconnected from %s", self._redis_server())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def surrogate_escape(error): """ Simulate the Python 3 ``surrogateescape`` handler, but for Python 2 only. """
chars = error.object[error.start:error.end] assert len(chars) == 1 val = ord(chars) val += 0xdc00 return __builtin__.unichr(val), error.end
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __gdal_dataset_default(self): """DiskReader implementation."""
if not os.path.exists(self.file_name): return None if os.path.splitext(self.file_name)[1].lower() not in self.file_types: raise RuntimeError('Filename %s does not have extension type %s.' % (self.file_name, self.file_types)) dataset = gdal.OpenShared(self.file_name, gdalconst.GA_ReadOnly) if dataset is None: raise ValueError('Dataset %s did not load properly.' % self.file_name) # Sanity checks. assert dataset.RasterCount > 0 # Seems okay... return dataset
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connect(self): """Connects the client object to redis. It's safe to use this method even if you are already connected. Note: this method is useless with autoconnect mode (default). Returns: a Future object with True as result if the connection was ok. """
if self.is_connected(): raise tornado.gen.Return(True) cb1 = self._read_callback cb2 = self._close_callback self.__callback_queue = collections.deque() self._reply_list = [] self.__reader = hiredis.Reader(replyError=ClientError) kwargs = self.connection_kwargs self.__connection = Connection(cb1, cb2, **kwargs) connection_status = yield self.__connection.connect() if connection_status is not True: # nothing left to do here, return raise tornado.gen.Return(False) if self.password is not None: authentication_status = yield self._call('AUTH', self.password) if authentication_status != b'OK': # incorrect password, return back the result LOG.warning("impossible to connect: bad password") self.__connection.disconnect() raise tornado.gen.Return(False) if self.db != 0: db_status = yield self._call('SELECT', self.db) if db_status != b'OK': LOG.warning("can't select db %s", self.db) raise tornado.gen.Return(False) raise tornado.gen.Return(True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _close_callback(self): """Callback called when redis closed the connection. The callback queue is emptied and we call each callback found with None or with an exception object to wake up blocked client. """
while True: try: callback = self.__callback_queue.popleft() callback(ConnectionError("closed connection")) except IndexError: break if self.subscribed: # pubsub clients self._reply_list.append(ConnectionError("closed connection")) self._condition.notify_all()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _read_callback(self, data=None): """Callback called when some data are read on the socket. The buffer is given to the hiredis parser. If a reply is complete, we put the decoded reply to on the reply queue. Args: data (str): string (buffer) read on the socket. """
try: if data is not None: self.__reader.feed(data) while True: reply = self.__reader.gets() if reply is not False: try: callback = self.__callback_queue.popleft() # normal client (1 reply = 1 callback) callback(reply) except IndexError: # pubsub clients self._reply_list.append(reply) self._condition.notify_all() else: break except hiredis.ProtocolError: # something nasty occured (corrupt stream => no way to recover) LOG.warning("corrupted stream => disconnect") self.disconnect()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def call(self, *args, **kwargs): """Calls a redis command and returns a Future of the reply. Args: *args: full redis command as variable length argument list or a Pipeline object (as a single argument). **kwargs: internal private options (do not use). Returns: a Future with the decoded redis reply as result (when available) or a ConnectionError object in case of connection error. Raises: ClientError: your Pipeline object is empty. Examples: def foobar(): client = Client() result = yield client.call("HSET", "key", "field", "val") """
if not self.is_connected(): if self.autoconnect: # We use this method only when we are not contected # to void performance penaly due to gen.coroutine decorator return self._call_with_autoconnect(*args, **kwargs) else: error = ConnectionError("you are not connected and " "autoconnect=False") return tornado.gen.maybe_future(error) return self._call(*args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def async_call(self, *args, **kwargs): """Calls a redis command, waits for the reply and call a callback. Following options are available (not part of the redis command itself): - callback Function called (with the result as argument) when the result is available. If not set, the reply is silently discarded. In case of errors, the callback is called with a TornadisException object as argument. Args: *args: full redis command as variable length argument list or a Pipeline object (as a single argument). **kwargs: options as keyword parameters. Examples: pass """
def after_autoconnect_callback(future): if self.is_connected(): self._call(*args, **kwargs) else: # FIXME pass if 'callback' not in kwargs: kwargs['callback'] = discard_reply_cb if not self.is_connected(): if self.autoconnect: connect_future = self.connect() cb = after_autoconnect_callback self.__connection._ioloop.add_future(connect_future, cb) else: error = ConnectionError("you are not connected and " "autoconnect=False") kwargs['callback'](error) else: self._call(*args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_args_in_redis_protocol(*args): This function makes and returns a string/buffer corresponding to given arguments formated with the redis protocol. integer, text, string or binary types are automatically converted (using utf8 if necessary). More informations about the protocol: http://redis.io/topics/protocol Args: *args: full redis command as variable length argument list Returns: binary string (arguments in redis protocol) Examples: '*4\r\n$4\r\nHSET\r\n$3\r\nkey\r\n$5\r\nfield\r\n$5\r\nvalue\r\n' """
buf = WriteBuffer() l = "*%d\r\n" % len(args) # noqa: E741 if six.PY2: buf.append(l) else: # pragma: no cover buf.append(l.encode('utf-8')) for arg in args: if isinstance(arg, six.text_type): # it's a unicode string in Python2 or a standard (unicode) # string in Python3, let's encode it in utf-8 to get raw bytes arg = arg.encode('utf-8') elif isinstance(arg, six.string_types): # it's a basestring in Python2 => nothing to do pass elif isinstance(arg, six.binary_type): # pragma: no cover # it's a raw bytes string in Python3 => nothing to do pass elif isinstance(arg, six.integer_types): tmp = "%d" % arg if six.PY2: arg = tmp else: # pragma: no cover arg = tmp.encode('utf-8') elif isinstance(arg, WriteBuffer): # it's a WriteBuffer object => nothing to do pass else: raise Exception("don't know what to do with %s" % type(arg)) l = "$%d\r\n" % len(arg) # noqa: E741 if six.PY2: buf.append(l) else: # pragma: no cover buf.append(l.encode('utf-8')) buf.append(arg) buf.append(b"\r\n") return buf
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _done_callback(self, wrapped): """Internal "done callback" to set the result of the object. The result of the object if forced by the wrapped future. So this internal callback must be called when the wrapped future is ready. Args: wrapped (Future): the wrapped Future object """
if wrapped.exception(): self.set_exception(wrapped.exception()) else: self.set_result(wrapped.result())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def result(self): """The result method which returns a context manager Returns: ContextManager: The corresponding context manager """
if self.exception(): raise self.exception() # Otherwise return a context manager that cleans up after the block. @contextlib.contextmanager def f(): try: yield self._wrapped.result() finally: self._exit_callback() return f()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_url(artist, song): """Create the URL in the LyricWikia format"""
return (__BASE_URL__ + '/wiki/{artist}:{song}'.format(artist=urlize(artist), song=urlize(song)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_lyrics(artist, song, linesep='\n', timeout=None): """Retrieve the lyrics of the song and return the first one in case multiple versions are available."""
return get_all_lyrics(artist, song, linesep, timeout)[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all_lyrics(artist, song, linesep='\n', timeout=None): """Retrieve a list of all the lyrics versions of a song."""
url = create_url(artist, song) response = _requests.get(url, timeout=timeout) soup = _BeautifulSoup(response.content, "html.parser") lyricboxes = soup.findAll('div', {'class': 'lyricbox'}) if not lyricboxes: raise LyricsNotFound('Cannot download lyrics') for lyricbox in lyricboxes: for br in lyricbox.findAll('br'): br.replace_with(linesep) return [lyricbox.text.strip() for lyricbox in lyricboxes]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def open_file(name, mode=None, driver=None, libver=None, userblock_size=None, **kwargs): """Open an ARF file, creating as necessary. Use this instead of h5py.File to ensure that root-level attributes and group creation property lists are set correctly. """
import sys import os from h5py import h5p from h5py._hl import files try: # If the byte string doesn't match the default # encoding, just pass it on as-is. Note Unicode # objects can always be encoded. name = name.encode(sys.getfilesystemencoding()) except (UnicodeError, LookupError): pass exists = os.path.exists(name) try: fcpl = h5p.create(h5p.FILE_CREATE) fcpl.set_link_creation_order( h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED) except AttributeError: # older version of h5py fp = files.File(name, mode=mode, driver=driver, libver=libver, **kwargs) else: fapl = files.make_fapl(driver, libver, **kwargs) fp = files.File(files.make_fid(name, mode, userblock_size, fapl, fcpl)) if not exists and fp.mode == 'r+': set_attributes(fp, arf_library='python', arf_library_version=__version__, arf_version=spec_version) return fp
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_entry(group, name, timestamp, **attributes): """Create a new ARF entry under group, setting required attributes. An entry is an abstract collection of data which all refer to the same time frame. Data can include physiological recordings, sound recordings, and derived data such as spike times and labels. See add_data() for information on how data are stored. name -- the name of the new entry. any valid python string. timestamp -- timestamp of entry (datetime object, or seconds since January 1, 1970). Can be an integer, a float, or a tuple of integers (seconds, microsceconds) Additional keyword arguments are set as attributes on created entry. Returns: newly created entry object """
# create group using low-level interface to store creation order from h5py import h5p, h5g, _hl try: gcpl = h5p.create(h5p.GROUP_CREATE) gcpl.set_link_creation_order( h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED) except AttributeError: grp = group.create_group(name) else: name, lcpl = group._e(name, lcpl=True) grp = _hl.group.Group(h5g.create(group.id, name, lcpl=lcpl, gcpl=gcpl)) set_uuid(grp, attributes.pop("uuid", None)) set_attributes(grp, timestamp=convert_timestamp(timestamp), **attributes) return grp
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_dataset(group, name, data, units='', datatype=DataTypes.UNDEFINED, chunks=True, maxshape=None, compression=None, **attributes): """Create an ARF dataset under group, setting required attributes Required arguments: name -- the name of dataset in which to store the data data -- the data to store Data can be of the following types: * sampled data: an N-D numerical array of measurements * "simple" event data: a 1-D array of times * "complex" event data: a 1-D array of records, with field 'start' required Optional arguments: datatype -- a code defining the nature of the data in the channel units -- channel units (optional for sampled data, otherwise required) sampling_rate -- required for sampled data and event data with units=='samples' Arguments passed to h5py: maxshape -- make the node resizable up to this shape. Use None for axes that need to be unlimited. chunks -- specify the chunk size. The optimal chunk size depends on the intended use of the data. For single-channel sampled data the auto-chunking (True) is probably best. compression -- compression strategy. Can be 'gzip', 'szip', 'lzf' or an integer in range(10) specifying gzip(N). Only gzip is really portable. Additional arguments are set as attributes on the created dataset Returns the created dataset """
from numpy import asarray srate = attributes.get('sampling_rate', None) # check data validity before doing anything if not hasattr(data, 'dtype'): data = asarray(data) if data.dtype.kind in ('S', 'O', 'U'): raise ValueError( "data must be in array with numeric or compound type") if data.dtype.kind == 'V': if 'start' not in data.dtype.names: raise ValueError("complex event data requires 'start' field") if not isinstance(units, (list, tuple)): raise ValueError("complex event data requires sequence of units") if not len(units) == len(data.dtype.names): raise ValueError("number of units doesn't match number of fields") if units == '': if srate is None or not srate > 0: raise ValueError( "unitless data assumed time series and requires sampling_rate attribute") elif units == 'samples': if srate is None or not srate > 0: raise ValueError( "data with units of 'samples' requires sampling_rate attribute") # NB: can't really catch case where sampled data has units but doesn't # have sampling_rate attribute dset = group.create_dataset( name, data=data, maxshape=maxshape, chunks=chunks, compression=compression) set_attributes(dset, units=units, datatype=datatype, **attributes) return dset
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_file_version(file): """Check the ARF version attribute of file for compatibility. Raises DeprecationWarning for backwards-incompatible files, FutureWarning for (potentially) forwards-incompatible files, and UserWarning for files that may not have been created by an ARF library. Returns the version for the file """
from distutils.version import StrictVersion as Version try: ver = file.attrs.get('arf_version', None) if ver is None: ver = file.attrs['arf_library_version'] except KeyError: raise UserWarning( "Unable to determine ARF version for {0.filename};" "created by another program?".format(file)) try: # if the attribute is stored as a string, it's ascii-encoded ver = ver.decode("ascii") except (LookupError, AttributeError): pass # should be backwards compatible after 1.1 file_version = Version(ver) if file_version < Version('1.1'): raise DeprecationWarning( "ARF library {} may have trouble reading file " "version {} (< 1.1)".format(version, file_version)) elif file_version >= Version('3.0'): raise FutureWarning( "ARF library {} may be incompatible with file " "version {} (>= 3.0)".format(version, file_version)) return file_version
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_attributes(node, overwrite=True, **attributes): """Set multiple attributes on node. If overwrite is False, and the attribute already exists, does nothing. If the value for a key is None, the attribute is deleted. """
aset = node.attrs for k, v in attributes.items(): if not overwrite and k in aset: pass elif v is None: if k in aset: del aset[k] else: aset[k] = v
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def keys_by_creation(group): """Returns a sequence of links in group in order of creation. Raises an error if the group was not set to track creation order. """
from h5py import h5 out = [] try: group._id.links.iterate( out.append, idx_type=h5.INDEX_CRT_ORDER, order=h5.ITER_INC) except (AttributeError, RuntimeError): # pre 2.2 shim def f(name): if name.find(b'/', 1) == -1: out.append(name) group._id.links.visit( f, idx_type=h5.INDEX_CRT_ORDER, order=h5.ITER_INC) return map(group._d, out)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert_timestamp(obj): """Make an ARF timestamp from an object. Argument can be a datetime.datetime object, a time.struct_time, an integer, a float, or a tuple of integers. The returned value is a numpy array with the integer number of seconds since the Epoch and any additional microseconds. Note that because floating point values are approximate, the conversion between float and integer tuple may not be reversible. """
import numbers from datetime import datetime from time import mktime, struct_time from numpy import zeros out = zeros(2, dtype='int64') if isinstance(obj, datetime): out[0] = mktime(obj.timetuple()) out[1] = obj.microsecond elif isinstance(obj, struct_time): out[0] = mktime(obj) elif isinstance(obj, numbers.Integral): out[0] = obj elif isinstance(obj, numbers.Real): out[0] = obj out[1] = (obj - out[0]) * 1e6 else: try: out[:2] = obj[:2] except: raise TypeError("unable to convert %s to timestamp" % obj) return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_uuid(obj, uuid=None): """Set the uuid attribute of an HDF5 object. Use this method to ensure correct dtype """
from uuid import uuid4, UUID if uuid is None: uuid = uuid4() elif isinstance(uuid, bytes): if len(uuid) == 16: uuid = UUID(bytes=uuid) else: uuid = UUID(hex=uuid) if "uuid" in obj.attrs: del obj.attrs["uuid"] obj.attrs.create("uuid", str(uuid).encode('ascii'), dtype="|S36")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_uuid(obj): """Return the uuid for obj, or null uuid if none is set"""
# TODO: deprecate null uuid ret val from uuid import UUID try: uuid = obj.attrs['uuid'] except KeyError: return UUID(int=0) # convert to unicode for python 3 try: uuid = uuid.decode('ascii') except (LookupError, AttributeError): pass return UUID(uuid)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count_children(obj, type=None): """Return the number of children of obj, optionally restricting by class"""
if type is None: return len(obj) else: # there doesn't appear to be any hdf5 function for getting this # information without inspecting each child, which makes this somewhat # slow return sum(1 for x in obj if obj.get(x, getclass=True) is type)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _todict(cls): """ generate a dict keyed by value """
return dict((getattr(cls, attr), attr) for attr in dir(cls) if not attr.startswith('_'))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def compare_singularity_images(image_paths1, image_paths2=None): '''compare_singularity_images is a wrapper for compare_containers to compare singularity containers. If image_paths2 is not defined, pairwise comparison is done with image_paths1 ''' repeat = False if image_paths2 is None: image_paths2 = image_paths1 repeat = True if not isinstance(image_paths1,list): image_paths1 = [image_paths1] if not isinstance(image_paths2,list): image_paths2 = [image_paths2] dfs = pandas.DataFrame(index=image_paths1,columns=image_paths2) comparisons_done = [] for image1 in image_paths1: fileobj1,tar1 = get_image_tar(image1) members1 = [x.name for x in tar1] for image2 in image_paths2: comparison_id = [image1,image2] comparison_id.sort() comparison_id = "".join(comparison_id) if comparison_id not in comparisons_done: if image1 == image2: sim = 1.0 else: fileobj2,tar2 = get_image_tar(image2) members2 = [x.name for x in tar2] c = compare_lists(members1, members2) sim = information_coefficient(c['total1'],c['total2'],c['intersect']) delete_image_tar(fileobj2, tar2) dfs.loc[image1,image2] = sim if repeat: dfs.loc[image2,image1] = sim comparisons_done.append(comparison_id) delete_image_tar(fileobj1, tar1) return dfs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_image_hashes(image_path, version=None, levels=None): '''get_image_hashes returns the hash for an image across all levels. This is the quickest, easiest way to define a container's reproducibility on each level. ''' if levels is None: levels = get_levels(version=version) hashes = dict() for level_name,level_filter in levels.items(): hashes[level_name] = get_image_hash(image_path, level_filter=level_filter) return hashes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def add_node(node, parent): '''add_node will add a node to it's parent ''' newNode = dict(node_id=node.id, children=[]) parent["children"].append(newNode) if node.left: add_node(node.left, newNode) if node.right: add_node(node.right, newNode)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def file_counts(container=None, patterns=None, image_package=None, file_list=None): '''file counts will return a list of files that match one or more regular expressions. if no patterns is defined, a default of readme is used. All patterns and files are made case insensitive. Parameters ========== :param container: if provided, will use container as image. Can also provide :param image_package: if provided, can be used instead of container :param patterns: one or more patterns (str or list) of files to search for. :param diff: the difference between a container and it's parent OS from get_diff if not provided, will be generated. ''' if file_list is None: file_list = get_container_contents(container, split_delim='\n')['all'] if patterns == None: patterns = 'readme' if not isinstance(patterns,list): patterns = [patterns] count = 0 for pattern in patterns: count += len([x for x in file_list if re.search(pattern.lower(),x.lower())]) bot.info("Total files matching patterns is %s" %count) return count
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def include_file(member,file_filter): '''include_file will look at a path and determine if it matches a regular expression from a level ''' member_path = member.name.replace('.','',1) if len(member_path) == 0: return False # Does the filter skip it explicitly? if "skip_files" in file_filter: if member_path in file_filter['skip_files']: return False # Include explicitly? if "include_files" in file_filter: if member_path in file_filter['include_files']: return True # Regular expression? if "regexp" in file_filter: if re.search(file_filter["regexp"],member_path): return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def assess_content(member,file_filter): '''Determine if the filter wants the file to be read for content. In the case of yes, we would then want to add the content to the hash and not the file object. ''' member_path = member.name.replace('.','',1) if len(member_path) == 0: return False # Does the filter skip it explicitly? if "skip_files" in file_filter: if member_path in file_filter['skip_files']: return False if "assess_content" in file_filter: if member_path in file_filter['assess_content']: return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_level(level,version=None,include_files=None,skip_files=None): '''get_level returns a single level, with option to customize files added and skipped. ''' levels = get_levels(version=version) level_names = list(levels.keys()) if level.upper() in level_names: level = levels[level] else: bot.warning("%s is not a valid level. Options are %s" %(level.upper(), "\n".join(levels))) return None # Add additional files to skip or remove, if defined if skip_files is not None: level = modify_level(level,'skip_files',skip_files) if include_files is not None: level = modify_level(level,'include_files',include_files) level = make_level_set(level) return level
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def make_levels_set(levels): '''make set efficient will convert all lists of items in levels to a set to speed up operations''' for level_key,level_filters in levels.items(): levels[level_key] = make_level_set(level_filters) return levels
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def make_level_set(level): '''make level set will convert one level into a set''' new_level = dict() for key,value in level.items(): if isinstance(value,list): new_level[key] = set(value) else: new_level[key] = value return new_level
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def extract_guts(image_path, tar, file_filter=None, tag_root=True, include_sizes=True): '''extract the file guts from an in memory tarfile. The file is not closed. This should not be done for large images. ''' if file_filter is None: file_filter = get_level('IDENTICAL') results = dict() digest = dict() allfiles = [] if tag_root: roots = dict() if include_sizes: sizes = dict() for member in tar: member_name = member.name.replace('.','',1) allfiles.append(member_name) included = False if member.isdir() or member.issym(): continue elif assess_content(member,file_filter): digest[member_name] = extract_content(image_path, member.name, return_hash=True) included = True elif include_file(member,file_filter): hasher = hashlib.md5() buf = member.tobuf() hasher.update(buf) digest[member_name] = hasher.hexdigest() included = True if included: if include_sizes: sizes[member_name] = member.size if tag_root: roots[member_name] = is_root_owned(member) results['all'] = allfiles results['hashes'] = digest if include_sizes: results['sizes'] = sizes if tag_root: results['root_owned'] = roots return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_memory_tar(image_path): '''get an in memory tar of an image. Use carefully, not as reliable as get_image_tar ''' byte_array = Client.image.export(image_path) file_object = io.BytesIO(byte_array) tar = tarfile.open(mode="r|*", fileobj=file_object) return (file_object,tar)