Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def d(nominal_temperature): # From CIE 15:2004. Colorimetry, 3rd edition, 2004 (page 69, note 5): # # The method required to calculate the values for the relative spectral # power distributions of illuminants D50, D55, D65, and D75, in Table T.1 # is as follows # 1. Multiply the nominal correlated colour temperature (5000 K, 5500 K, # 6500 K or 7500 K) by 1,4388/1,4380. # 2. Calculate XD and YD using the equations given in the text. # 3. Calculate M1 and M2 using the equations given in the text. # 4. Round M1 and M2 to three decimal places. # 5. Calculate S(lambda) every 10 nm by # S(lambda) = S0(lambda) + M1 S1(lambda) + M2 S2(lambda) # using values of S0(lambda), S1(lambda) and S2(lambda) from # Table T.2. # 6. Interpolate the 10 nm values of S(lambda) linearly to obtain values # at intermediate wavelengths. tcp = 1.4388e-2 / 1.4380e-2 * nominal_temperature if 4000 <= tcp <= 7000: xd = ((-4.6070e9 / tcp + 2.9678e6) / tcp + 0.09911e3) / tcp + 0.244063 else: assert 7000 < tcp <= 25000 xd = ((-2.0064e9 / tcp + 1.9018e6) / tcp + 0.24748e3) / tcp + 0.237040 yd = (-3.000 * xd + 2.870) * xd - 0.275 m1 = (-1.3515 - 1.7703 * xd + 5.9114 * yd) / (0.0241 + 0.2562 * xd - 0.7341 * yd) m2 = (+0.0300 - 31.4424 * xd + 30.0717 * yd) / (0.0241 + 0.2562 * xd - 0.7341 * yd) m1 = numpy.around(m1, decimals=3) m2 = numpy.around(m2, decimals=3) dir_path = os.path.dirname(os.path.realpath(__file__)) with open(os.path.join(dir_path, "data/illuminants/d.yaml")) as f: data = yaml.safe_load(f) data = numpy.array(data).T lmbda = data[0] s = data[1:] return lmbda, s[0] + m1 * s[1] + m2 * s[2]
[ "CIE D-series illuminants.\n\n The technical report `Colorimetry, 3rd edition, 2004` gives the data for\n D50, D55, and D65 explicitly, but also explains how it's computed for S0,\n S1, S2. Values are given at 5nm resolution in the document, but really\n every other value is just interpolated. Hence, only provide 10 nm data\n here.\n " ]
Please provide a description of the function:def e(): lmbda = 1.0e-9 * numpy.arange(300, 831) data = numpy.full(lmbda.shape, 100.0) return lmbda, data
[ "This is a hypothetical reference radiator. All wavelengths in CIE\n illuminant E are weighted equally with a relative spectral power of 100.0.\n " ]
Please provide a description of the function:def to_xyz100(self, data, description): rgb_c = compute_to(data, description, self) # Step 6: Calculate R, G and B # rgb = (rgb_c.T / self.D_RGB).T # Step 7: Calculate X, Y and Z # xyz = self.solve_M16(rgb) return dot(self.invM_, rgb_c)
[ "Input: J or Q; C, M or s; H or h\n " ]
Please provide a description of the function:def dot(a, b): b = numpy.asarray(b) return numpy.dot(a, b.reshape(b.shape[0], -1)).reshape(a.shape[:-1] + b.shape[1:])
[ "Take arrays `a` and `b` and form the dot product between the last axis\n of `a` and the first of `b`.\n " ]
Please provide a description of the function:def solve(A, x): # https://stackoverflow.com/a/48387507/353337 x = numpy.asarray(x) return numpy.linalg.solve(A, x.reshape(x.shape[0], -1)).reshape(x.shape)
[ "Solves a linear equation system with a matrix of shape (n, n) and an\n array of shape (n, ...). The output has the same shape as the second\n argument.\n " ]
Please provide a description of the function:def to_xyz100(self, data, description): # Steps 1-5 rgb_ = compute_to(data, description, self) # Step 6: Calculate RC, GC and BC # rgb_c = dot(self.M_cat02, solve(self.M_hpe, rgb_)) # # Step 7: Calculate R, G and B # rgb = (rgb_c.T / self.D_RGB).T # # Step 8: Calculate X, Y and Z # xyz = solve(self.M_cat02, rgb) return dot(self.invM_, rgb_)
[ "Input: J or Q; C, M or s; H or h\n " ]
Please provide a description of the function:def main(): parser = getparser() args = parser.parse_args() fn = args.fn sitename = args.sitename #User-specified output extent #Note: not checked, untested if args.extent is not None: extent = (args.extent).split() else: extent = (geolib.site_dict[sitename]).extent if args.refdem_fn is not None: refdem_fn = args.refdem_fn else: refdem_fn = (geolib.site_dict[sitename]).refdem_fn #Max elevation difference between shot and sampled DEM max_z_DEM_diff = 200 #Max elevation std for sampled DEM values in padded window around shot max_DEMhiresArElv_std = 50.0 f = h5py.File(fn) t = f.get('Data_40HZ/Time/d_UTCTime_40')[:] #pyt0 = datetime(1, 1, 1, 0, 0) #utct0 = datetime(1970, 1, 1, 0, 0) #t0 = datetime(2000, 1, 1, 12, 0, 0) #offset_s = (t0 - utct0).total_seconds() offset_s = 946728000.0 t += offset_s dt = timelib.np_utc2dt(t) dt_o = timelib.dt2o(dt) #dts = timelib.np_print_dt(dt) #dt_decyear = timelib.np_dt2decyear(dt) dt_int = np.array([ts.strftime('%Y%m%d') for ts in dt], dtype=long) lat = np.ma.masked_equal(f.get('Data_40HZ/Geolocation/d_lat')[:], 1.7976931348623157e+308) lon = np.ma.masked_equal(f.get('Data_40HZ/Geolocation/d_lon')[:], 1.7976931348623157e+308) lon = geolib.lon360to180(lon) z = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Surfaces/d_elev')[:], 1.7976931348623157e+308) print('Input: %i' % z.count()) #Now spatial filter - should do this up front x = lon y = lat xmin, xmax, ymin, ymax = extent #This is True if point is within extent valid_idx = ((x >= xmin) & (x <= xmax) & (y >= ymin) & (y <= ymax)) #Prepare output array #out = np.ma.vstack([dt_decyear, dt_o, dt_int, lat, lon, z]).T out = np.ma.vstack([dt_o, dt_int, lat, lon, z]).T #Create a mask to ensure all four values are valid for each point mask = ~(np.any(np.ma.getmaskarray(out), axis=1)) mask *= valid_idx out = out[mask] valid_idx = ~(np.any(np.ma.getmaskarray(out), axis=1)) #Lon and lat indices xcol = 3 ycol = 2 zcol = 4 if out.shape[0] == 0: sys.exit("No points within specified extent\n") else: print("Spatial filter: %i" % out.shape[0]) #out_fmt = ['%0.8f', '%0.8f', '%i', '%0.6f', '%0.6f', '%0.2f'] #out_hdr = ['dt_decyear, dt_ordinal', 'dt_YYYYMMDD', 'lat', 'lon', 'z_WGS84'] out_fmt = ['%0.8f', '%i', '%0.6f', '%0.6f', '%0.2f'] out_hdr = ['dt_ordinal', 'dt_YYYYMMDD', 'lat', 'lon', 'z_WGS84'] #Saturation Correction Flag #These are 0 to 5, not_saturated inconsequential applicable not_computed not_applicable sat_corr_flg = f.get('Data_40HZ/Quality/sat_corr_flg')[mask] #valid_idx *= (sat_corr_flg < 2) #Correction to elevation for saturated waveforms #Notes suggest this might not be desirable over land satElevCorr = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Corrections/d_satElevCorr')[mask], 1.7976931348623157e+308) #z[sat_corr_flg < 3] += satElevCorr.filled(0.0)[sat_corr_flg < 3] out[:,zcol] += satElevCorr.filled(0.0) #Correction to elevation based on post flight analysis for biases determined for each campaign ElevBiasCorr = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Corrections/d_ElevBiasCorr')[mask], 1.7976931348623157e+308) out[:,zcol] += ElevBiasCorr.filled(0.0) #Surface elevation (T/P ellipsoid) minus surface elevation (WGS84 ellipsoid). #Approximately 0.7 m, so WGS is lower; need to subtract from d_elev deltaEllip = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_deltaEllip')[mask], 1.7976931348623157e+308) out[:,zcol] -= deltaEllip #These are 1 for valid, 0 for invalid valid_idx *= ~(np.ma.getmaskarray(out[:,zcol])) print("z corrections: %i" % valid_idx.nonzero()[0].size) if False: #Reflectivity, not corrected for atmospheric effects reflctUC = np.ma.masked_equal(f.get('Data_40HZ/Reflectivity/d_reflctUC')[mask], 1.7976931348623157e+308) #This was minimum used for ice sheets min_reflctUC = 0.025 valid_idx *= (reflctUC > min_reflctUC).data print("reflctUC: %i" % valid_idx.nonzero()[0].size) if False: #The Standard deviation of the difference between the functional fit and the received echo \ #using alternate parameters. It is directly taken from GLA05 parameter d_wfFitSDev_1 LandVar = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Surfaces/d_LandVar')[mask], 1.7976931348623157e+308) #This was max used for ice sheets max_LandVar = 0.04 valid_idx *= (LandVar < max_LandVar).data print("LandVar: %i" % valid_idx.nonzero()[0].size) if True: #Flag indicating whether the elevations on this record should be used. #0 = valid, 1 = not valid elev_use_flg = f.get('Data_40HZ/Quality/elev_use_flg')[mask].astype('Bool') valid_idx *= ~elev_use_flg print("elev_use_flg: %i" % valid_idx.nonzero()[0].size) if False: #Cloud contamination; Indicates if Gain > flag value, indicating probable cloud contamination. elv_cloud_flg = f.get('Data_40HZ/Elevation_Flags/elv_cloud_flg')[mask].astype('Bool') valid_idx *= ~elv_cloud_flg print("elv_cloud_flg: %i" % valid_idx.nonzero()[0].size) if False: #Full resolution 1064 Quality Flag; 0 - 12 indicate Cloud detected FRir_qa_flg = f.get('Data_40HZ/Atmosphere/FRir_qa_flg')[mask] valid_idx *= (FRir_qa_flg == 15).data print("FRir_qa_flg: %i" % valid_idx.nonzero()[0].size) if False: #This is elevation extracted from SRTM30 DEM_elv = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_DEM_elv')[mask], 1.7976931348623157e+308) z_DEM_diff = np.abs(out[:,zcol] - DEM_elv) valid_idx *= (z_DEM_diff < max_z_DEM_diff).data print("z_DEM_diff: %i" % valid_idx.nonzero()[0].size) #d_DEMhiresArElv is a 9 element array of high resolution DEM values. The array index corresponds to the position of the DEM value relative to the spot. (5) is the footprint center. DEMhiresArElv = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_DEMhiresArElv')[mask], 1.7976931348623157e+308) DEMhiresArElv_std = np.ma.std(DEMhiresArElv, axis=1) valid_idx *= (DEMhiresArElv_std < max_DEMhiresArElv_std).data print("max_DEMhiresArElv_std: %i" % valid_idx.nonzero()[0].size) #Compute slope #Apply cumulative filter to output out = out[valid_idx] out_fn = os.path.splitext(fn)[0]+'_%s.csv' % sitename print("Writing out %i records to: %s\n" % (out.shape[0], out_fn)) out_fmt_str = ', '.join(out_fmt) out_hdr_str = ', '.join(out_hdr) np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str) iolib.writevrt(out_fn, x='lon', y='lat') #Extract our own DEM values - should be better than default GLAS reference DEM stats if True: print("Loading reference DEM: %s" % refdem_fn) dem_ds = gdal.Open(refdem_fn) print("Converting coords for DEM") dem_mX, dem_mY = geolib.ds_cT(dem_ds, out[:,xcol], out[:,ycol], geolib.wgs_srs) print("Sampling") dem_samp = geolib.sample(dem_ds, dem_mX, dem_mY, pad='glas') abs_dem_z_diff = np.abs(out[:,zcol] - dem_samp[:,0]) valid_idx *= ~(np.ma.getmaskarray(abs_dem_z_diff)) print("Valid DEM extract: %i" % valid_idx.nonzero()[0].size) valid_idx *= (abs_dem_z_diff < max_z_DEM_diff).data print("Valid abs DEM diff: %i" % valid_idx.nonzero()[0].size) valid_idx *= (dem_samp[:,1] < max_DEMhiresArElv_std).data print("Valid DEM mad: %i" % valid_idx.nonzero()[0].size) if valid_idx.nonzero()[0].size == 0: sys.exit("No valid points remain") out = np.ma.hstack([out, dem_samp]) out_fmt.extend(['%0.2f', '%0.2f']) out_hdr.extend(['z_refdem_med_WGS84', 'z_refdem_nmad']) #Apply cumulative filter to output out = out[valid_idx] out_fn = os.path.splitext(out_fn)[0]+'_refdemfilt.csv' print("Writing out %i records to: %s\n" % (out.shape[0], out_fn)) out_fmt_str = ', '.join(out_fmt) out_hdr_str = ', '.join(out_hdr) np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str) iolib.writevrt(out_fn, x='lon', y='lat') #This will sample land-use/land-cover or percent bareground products #Can be used to isolate points over exposed rock #if args.rockfilter: if True: #This should automatically identify appropriate LULC source based on refdem extent lulc_source = dem_mask.get_lulc_source(dem_ds) #Looks like NED extends beyond NCLD, force use NLCD for conus #if sitename == 'conus': # lulc_source = 'nlcd' lulc_ds = dem_mask.get_lulc_ds_full(dem_ds, lulc_source) print("Converting coords for LULC") lulc_mX, lulc_mY = geolib.ds_cT(lulc_ds, out[:,xcol], out[:,ycol], geolib.wgs_srs) print("Sampling LULC: %s" % lulc_source) #Note: want to make sure we're not interpolating integer values for NLCD #Should be safe with pad=0, even with pad>0, should take median, not mean lulc_samp = geolib.sample(lulc_ds, lulc_mX, lulc_mY, pad=0) l = lulc_samp[:,0].data if lulc_source == 'nlcd': #This passes rock and ice pixels valid_idx = np.logical_or((l==31),(l==12)) elif lulc_source == 'bareground': #This preserves pixels with bareground percentation >85% minperc = 85 valid_idx = (l >= minperc) else: print("Unknown LULC source") print("LULC: %i" % valid_idx.nonzero()[0].size) if l.ndim == 1: l = l[:,np.newaxis] out = np.ma.hstack([out, l]) out_fmt.append('%i') out_hdr.append('lulc') #Apply cumulative filter to output out = out[valid_idx] out_fn = os.path.splitext(out_fn)[0]+'_lulcfilt.csv' print("Writing out %i records to: %s\n" % (out.shape[0], out_fn)) out_fmt_str = ', '.join(out_fmt) out_hdr_str = ', '.join(out_hdr) np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str) iolib.writevrt(out_fn, x='lon', y='lat')
[ "\n ICESat-1 filters\n " ]
Please provide a description of the function:def get_nlcd_fn(): #This is original filename, which requires ~17 GB #nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.img') #get_nlcd.sh now creates a compressed GTiff, which is 1.1 GB nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.tif') if not os.path.exists(nlcd_fn): cmd = ['get_nlcd.sh',] #subprocess.call(cmd) sys.exit("Missing nlcd data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0]) return nlcd_fn
[ "Calls external shell script `get_nlcd.sh` to fetch:\n\n 2011 Land Use Land Cover (nlcd) grids, 30 m\n \n http://www.mrlc.gov/nlcd11_leg.php\n " ]
Please provide a description of the function:def get_bareground_fn(): bg_fn = os.path.join(datadir, 'bare2010/bare2010.vrt') if not os.path.exists(bg_fn): cmd = ['get_bareground.sh',] sys.exit("Missing bareground data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0]) #subprocess.call(cmd) return bg_fn
[ "Calls external shell script `get_bareground.sh` to fetch:\n\n ~2010 global bare ground, 30 m\n\n Note: unzipped file size is 64 GB! Original products are uncompressed, and tiles are available globally (including empty data over ocean)\n\n The shell script will compress all downloaded tiles using lossless LZW compression.\n\n http://landcover.usgs.gov/glc/BareGroundDescriptionAndDownloads.php\n " ]
Please provide a description of the function:def get_glacier_poly(): #rgi_fn = os.path.join(datadir, 'rgi50/regions/rgi50_merge.shp') #Update to rgi60, should have this returned from get_rgi.sh rgi_fn = os.path.join(datadir, 'rgi60/regions/rgi60_merge.shp') if not os.path.exists(rgi_fn): cmd = ['get_rgi.sh',] sys.exit("Missing rgi glacier data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0]) #subprocess.call(cmd) return rgi_fn
[ "Calls external shell script `get_rgi.sh` to fetch:\n\n Randolph Glacier Inventory (RGI) glacier outline shapefiles \n\n Full RGI database: rgi50.zip is 410 MB\n\n The shell script will unzip and merge regional shp into single global shp\n \n http://www.glims.org/RGI/\n " ]
Please provide a description of the function:def get_icemask(ds, glac_shp_fn=None): print("Masking glaciers") if glac_shp_fn is None: glac_shp_fn = get_glacier_poly() if not os.path.exists(glac_shp_fn): print("Unable to locate glacier shp: %s" % glac_shp_fn) else: print("Found glacier shp: %s" % glac_shp_fn) #All of the proj, extent, handling should now occur in shp2array icemask = geolib.shp2array(glac_shp_fn, ds) return icemask
[ "Generate glacier polygon raster mask for input Dataset res/extent\n " ]
Please provide a description of the function:def get_nlcd_mask(nlcd_ds, filter='not_forest', out_fn=None): print("Loading NLCD LULC") b = nlcd_ds.GetRasterBand(1) l = b.ReadAsArray() print("Filtering NLCD LULC with: %s" % filter) #Original nlcd products have nan as ndv #12 - ice #31 - rock #11 - open water, includes rivers #52 - shrub, <5 m tall, >20% #42 - evergreeen forest #Should use data dictionary here for general masking #Using 'rock+ice+water' preserves the most pixels, although could be problematic over areas with lakes if filter == 'rock': mask = (l==31) elif filter == 'rock+ice': mask = np.logical_or((l==31),(l==12)) elif filter == 'rock+ice+water': mask = np.logical_or(np.logical_or((l==31),(l==12)),(l==11)) elif filter == 'not_forest': mask = ~(np.logical_or(np.logical_or((l==41),(l==42)),(l==43))) elif filter == 'not_forest+not_water': mask = ~(np.logical_or(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)),(l==11))) else: print("Invalid mask type") mask = None #Write out original data if out_fn is not None: print("Writing out %s" % out_fn) iolib.writeGTiff(l, out_fn, nlcd_ds) l = None return mask
[ "Generate raster mask for specified NLCD LULC filter\n " ]
Please provide a description of the function:def get_bareground_mask(bareground_ds, bareground_thresh=60, out_fn=None): print("Loading bareground") b = bareground_ds.GetRasterBand(1) l = b.ReadAsArray() print("Masking pixels with <%0.1f%% bare ground" % bareground_thresh) if bareground_thresh < 0.0 or bareground_thresh > 100.0: sys.exit("Invalid bare ground percentage") mask = (l>bareground_thresh) #Write out original data if out_fn is not None: print("Writing out %s" % out_fn) iolib.writeGTiff(l, out_fn, bareground_ds) l = None return mask
[ "Generate raster mask for exposed bare ground from global bareground data\n " ]
Please provide a description of the function:def get_snodas_ds(dem_dt, code=1036): import tarfile import gzip snodas_ds = None snodas_url_str = None outdir = os.path.join(datadir, 'snodas') if not os.path.exists(outdir): os.makedirs(outdir) #Note: unmasked products (beyond CONUS) are only available from 2010-present if dem_dt >= datetime(2003,9,30) and dem_dt < datetime(2010,1,1): snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/masked/%Y/%m_%b/SNODAS_%Y%m%d.tar' tar_subfn_str_fmt = 'us_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz' elif dem_dt >= datetime(2010,1,1): snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/unmasked/%Y/%m_%b/SNODAS_unmasked_%Y%m%d.tar' tar_subfn_str_fmt = './zz_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz' else: print("No SNODAS data available for input date") if snodas_url_str is not None: snodas_url = dem_dt.strftime(snodas_url_str) snodas_tar_fn = iolib.getfile(snodas_url, outdir=outdir) print("Unpacking") tar = tarfile.open(snodas_tar_fn) #gunzip to extract both dat and Hdr files, tar.gz for ext in ('dat', 'Hdr'): tar_subfn_str = tar_subfn_str_fmt % (code, ext) tar_subfn_gz = dem_dt.strftime(tar_subfn_str) tar_subfn = os.path.splitext(tar_subfn_gz)[0] print(tar_subfn) if outdir is not None: tar_subfn = os.path.join(outdir, tar_subfn) if not os.path.exists(tar_subfn): #Should be able to do this without writing intermediate gz to disk tar.extract(tar_subfn_gz) with gzip.open(tar_subfn_gz, 'rb') as f: outf = open(tar_subfn, 'wb') outf.write(f.read()) outf.close() os.remove(tar_subfn_gz) #Need to delete 'Created by module comment' line from Hdr, can contain too many characters bad_str = 'Created by module comment' snodas_fn = tar_subfn f = open(snodas_fn) output = [] for line in f: if not bad_str in line: output.append(line) f.close() f = open(snodas_fn, 'w') f.writelines(output) f.close() #Return GDAL dataset for extracted product snodas_ds = gdal.Open(snodas_fn) return snodas_ds
[ "Function to fetch and process SNODAS snow depth products for input datetime\n\n http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/index.html\n\n Product codes:\n 1036 is snow depth\n 1034 is SWE\n\n filename format: us_ssmv11036tS__T0001TTNATS2015042205HP001.Hdr\n\n " ]
Please provide a description of the function:def get_modis_tile_list(ds): from demcoreg import modis_grid modis_dict = {} for key in modis_grid.modis_dict: modis_dict[key] = ogr.CreateGeometryFromWkt(modis_grid.modis_dict[key]) geom = geolib.ds_geom(ds) geom_dup = geolib.geom_dup(geom) ct = osr.CoordinateTransformation(geom_dup.GetSpatialReference(), geolib.wgs_srs) geom_dup.Transform(ct) tile_list = [] for key, val in list(modis_dict.items()): if geom_dup.Intersects(val): tile_list.append(key) return tile_list
[ "Helper function to identify MODIS tiles that intersect input geometry\n\n modis_gird.py contains dictionary of tile boundaries (tile name and WKT polygon ring from bbox)\n\n See: https://modis-land.gsfc.nasa.gov/MODLAND_grid.html\n " ]
Please provide a description of the function:def get_modscag_fn_list(dem_dt, tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05'), pad_days=7): #Could also use global MODIS 500 m snowcover grids, 8 day #http://nsidc.org/data/docs/daac/modis_v5/mod10a2_modis_terra_snow_8-day_global_500m_grid.gd.html #These are HDF4, sinusoidal #Should be able to load up with warplib without issue import re import requests from bs4 import BeautifulSoup auth = iolib.get_auth() pad_days = timedelta(days=pad_days) dt_list = timelib.dt_range(dem_dt-pad_days, dem_dt+pad_days+timedelta(1), timedelta(1)) outdir = os.path.join(datadir, 'modscag') if not os.path.exists(outdir): os.makedirs(outdir) out_vrt_fn_list = [] for dt in dt_list: out_vrt_fn = os.path.join(outdir, dt.strftime('%Y%m%d_snow_fraction.vrt')) #If we already have a vrt and it contains all of the necessary tiles if os.path.exists(out_vrt_fn): vrt_ds = gdal.Open(out_vrt_fn) if np.all([np.any([tile in sub_fn for sub_fn in vrt_ds.GetFileList()]) for tile in tile_list]): out_vrt_fn_list.append(out_vrt_fn) continue #Otherwise, download missing tiles and rebuild #Try to use historic products modscag_fn_list = [] #Note: not all tiles are available for same date ranges in historic vs. real-time #Need to repeat search tile-by-tile for tile in tile_list: modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag-historic/%Y/%j/' modscag_url_base = dt.strftime(modscag_url_str) print("Trying: %s" % modscag_url_base) r = requests.get(modscag_url_base, auth=auth) modscag_url_fn = [] if r.ok: parsed_html = BeautifulSoup(r.content, "html.parser") modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile)) if not modscag_url_fn: #Couldn't find historic, try to use real-time products modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag/%Y/%j/' modscag_url_base = dt.strftime(modscag_url_str) print("Trying: %s" % modscag_url_base) r = requests.get(modscag_url_base, auth=auth) if r.ok: parsed_html = BeautifulSoup(r.content, "html.parser") modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile)) if not modscag_url_fn: print("Unable to fetch MODSCAG for %s" % dt) else: #OK, we got #Now extract actual tif filenames to fetch from html parsed_html = BeautifulSoup(r.content, "html.parser") #Fetch all tiles modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile)) if modscag_url_fn: modscag_url_fn = modscag_url_fn[0] modscag_url = os.path.join(modscag_url_base, modscag_url_fn) print(modscag_url) modscag_fn = os.path.join(outdir, os.path.split(modscag_url_fn)[-1]) if not os.path.exists(modscag_fn): iolib.getfile2(modscag_url, auth=auth, outdir=outdir) modscag_fn_list.append(modscag_fn) #Mosaic tiles - currently a hack if modscag_fn_list: cmd = ['gdalbuildvrt', '-vrtnodata', '255', out_vrt_fn] cmd.extend(modscag_fn_list) print(cmd) subprocess.call(cmd, shell=False) out_vrt_fn_list.append(out_vrt_fn) return out_vrt_fn_list
[ "Function to fetch and process MODSCAG fractional snow cover products for input datetime\n\n Products are tiled in MODIS sinusoidal projection\n\n example url: https://snow-data.jpl.nasa.gov/modscag-historic/2015/001/MOD09GA.A2015001.h07v03.005.2015006001833.snow_fraction.tif\n\n " ]
Please provide a description of the function:def proc_modscag(fn_list, extent=None, t_srs=None): #Use cubic spline here for improve upsampling ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline') stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list) #Create stack here - no need for most of mastack machinery, just make 3D array #Mask values greater than 100% (clouds, bad pixels, etc) ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8) stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8) stack_count.set_fill_value(0) stack_min = ma_stack.min(axis=0).astype(np.uint8) stack_min.set_fill_value(0) stack_max = ma_stack.max(axis=0).astype(np.uint8) stack_max.set_fill_value(0) stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8) stack_med.set_fill_value(0) out_fn = stack_fn + '_count.tif' iolib.writeGTiff(stack_count, out_fn, ds_list[0]) out_fn = stack_fn + '_max.tif' iolib.writeGTiff(stack_max, out_fn, ds_list[0]) out_fn = stack_fn + '_min.tif' iolib.writeGTiff(stack_min, out_fn, ds_list[0]) out_fn = stack_fn + '_med.tif' iolib.writeGTiff(stack_med, out_fn, ds_list[0]) ds = gdal.Open(out_fn) return ds
[ "Process the MODSCAG products for full date range, create composites and reproject\n " ]
Please provide a description of the function:def apply_xy_shift(ds, dx, dy, createcopy=True): print("X shift: ", dx) print("Y shift: ", dy) #Update geotransform gt_orig = ds.GetGeoTransform() gt_shift = np.copy(gt_orig) gt_shift[0] += dx gt_shift[3] += dy print("Original geotransform:", gt_orig) print("Updated geotransform:", gt_shift) #Update ds Geotransform if createcopy: ds_align = iolib.mem_drv.CreateCopy('', ds, 0) else: #Update in place, assume ds is opened as GA_Update ds_align = ds ds_align.SetGeoTransform(gt_shift) return ds_align
[ "\n Apply horizontal shift to GDAL dataset GeoTransform\n \n Returns:\n GDAL Dataset copy with updated GeoTransform\n " ]
Please provide a description of the function:def compute_offset_sad(dem1, dem2, pad=(9,9), plot=False): #This defines the search window size #Use half-pixel stride? #Note: stride is not properly implemented #stride = 1 #ref = dem1[::stride,::stride] #kernel = dem2[pad[0]:-pad[0]:stride, pad[1]:-pad[1]:stride] kernel = dem2[pad[0]:-pad[0], pad[1]:-pad[1]] #Want to pad evenly on both sides, so add +1 here m = np.zeros((pad[0]*2+1, pad[1]*2+1)) #Find integer pixel offset i = j = 0 for i in range(m.shape[0]): print(i) for j in range(m.shape[1]): print(j) ref = dem1[i:i+kernel.shape[0], j:j+kernel.shape[1]] diff = ref - kernel #Remove outliers beyond IQR diff_iqr = malib.calcperc(diff, (25,75)) diff = np.ma.masked_outside(diff, *diff_iqr) #Masked areas will decrease sum! Normalize by count of valid pixels m[i,j] = np.ma.abs(diff).sum()/diff.count() #Note, we're dealing with min SAD here, so want to provide -m for sub-pixel refinement m = -m int_argmax = np.array(np.unravel_index(m.argmax(), m.shape)) int_offset = int_argmax - pad sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic')) sp_offset = sp_argmax - pad if plot: plt.figure() plt.title('Sum of Absolute Differences') plt.imshow(m) plt.scatter(*sp_argmax[::-1]) #plt.show() return m, int_offset, sp_offset
[ "Compute subpixel horizontal offset between input rasters using sum of absolute differences (SAD) method\n ", " \n diff_med = np.ma.median(diff)\n diff_mad = malib.mad(diff)\n diff_madr = (diff_med - mad, diff_med + mad)\n diff = np.ma.masked_outside(diff, diff_madr) \n " ]
Please provide a description of the function:def compute_offset_ncc(dem1, dem2, pad=(9,9), prefilter=False, plot=False): #Apply edge detection filter up front - improves results when input DEMs are same resolution if prefilter: print("Applying LoG edge-detection filter to DEMs") sigma = 1 import scipy.ndimage #Note, ndimage alone propagates Nans and greatly reduces valid data area #Use the malib.nanfill wrapper to avoid this dem1 = malib.nanfill(dem1, scipy.ndimage.filters.gaussian_laplace, sigma) dem2 = malib.nanfill(dem2, scipy.ndimage.filters.gaussian_laplace, sigma) import scipy.signal #Compute max offset given dem spatial resolution #Should implement arbirary x and y search space #xsearch = (20, 41) #ysearch = (-10, 1) stride = 1 ref = dem1[::stride,::stride] kernel = dem2[pad[0]:-pad[1]:stride, pad[0]:-pad[1]:stride] #kernel = dem2[-ysearch[0]:-ysearch[1]:stride, xsearch[0]:-xsearch[1]:stride] #Normalize ref = (ref - ref.mean()) / ref.std() kernel = (kernel - kernel.mean()) / kernel.std() #Consider using astropy.convolve here instead of scipy.correlate? print("Adding random noise to masked regions") #Generate random noise to fill gaps before correlation in frequency domain #Normal distribution N(mean, std^2) #ref_noise = ref.mask * ref.std() * np.random.rand(*ref.shape) + ref.mean() #kernel_noise = kernel.mask * kernel.std() * np.random.rand(*kernel.shape) + kernel.mean() #This provides noise in proper range, but noise propagates to m, peak is in different locations! #ref_noise = ref.mask * (ref.min() + ref.ptp() * np.random.rand(*ref.shape)) #kernel_noise = kernel.mask * (kernel.min() + kernel.ptp() * np.random.rand(*kernel.shape)) #This provides a proper normal distribution with mean=0 and std=1 ref_noise = ref.mask * (np.random.randn(*ref.shape)) kernel_noise = kernel.mask * (np.random.randn(*kernel.shape)) #Add the noise ref = ref.filled(0) + ref_noise kernel = kernel.filled(0) + kernel_noise print("Running 2D correlation with search window (x,y): %i, %i" % (pad[1], pad[0])) m = scipy.signal.correlate2d(ref, kernel, 'valid') #This has memory issues, but ndimage filters can handle nan #m = scipy.ndimage.filters.correlate(ref, kernel) print("Computing sub-pixel peak") int_argmax = np.array(np.unravel_index(m.argmax(), m.shape)) int_offset = int_argmax*stride - pad #int_offset = int_argmax*stride + np.array([ysearch[0], xsearch[0]]) print(m.argmax()) print(m.shape) print(int_argmax) print(int_offset) #Find sub-pixel peak sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic')) #May need to split this into integer and decimal components, multipy stride*int and add decimal #sp_offset = int_offset + (sp_argmax - int_argmax) sp_offset = sp_argmax - pad #sp_offset = sp_argmax + np.array([ysearch[0], xsearch[0]]) print(sp_argmax) print(sp_offset) if plot: fig, ax = plt.subplots() ax.set_title('NCC offset, parabolic SPR') ax.imshow(m) #plt.scatter(*int_argmax[::-1]) ax.scatter(*sp_argmax[::-1]) else: fig = None return m, int_offset, sp_offset, fig
[ "Compute horizontal offset between input rasters using normalized cross-correlation (NCC) method\n " ]
Please provide a description of the function:def compute_offset_nuth(dh, slope, aspect, min_count=100, remove_outliers=True, plot=True): import scipy.optimize as optimization if dh.count() < min_count: sys.exit("Not enough dh samples") if slope.count() < min_count: sys.exit("Not enough slope/aspect samples") #mean_dh = dh.mean() #mean_slope = slope.mean() #c_seed = (mean_dh/np.tan(np.deg2rad(mean_slope))) med_dh = malib.fast_median(dh) med_slope = malib.fast_median(slope) c_seed = (med_dh/np.tan(np.deg2rad(med_slope))) x0 = np.array([0.0, 0.0, c_seed]) print("Computing common mask") common_mask = ~(malib.common_mask([dh, aspect, slope])) #Prepare x and y data xdata = aspect[common_mask].data ydata = (dh[common_mask]/np.tan(np.deg2rad(slope[common_mask]))).data print("Initial sample count:") print(ydata.size) if remove_outliers: print("Removing outliers") #print("Absolute dz filter: %0.2f" % max_dz) #diff = np.ma.masked_greater(diff, max_dz) #print(diff.count()) #Outlier dz filter f = 3 sigma, u = (ydata.std(), ydata.mean()) #sigma, u = malib.mad(ydata, return_med=True) rmin = u - f*sigma rmax = u + f*sigma print("3-sigma filter: %0.2f - %0.2f" % (rmin, rmax)) idx = (ydata >= rmin) & (ydata <= rmax) xdata = xdata[idx] ydata = ydata[idx] print(ydata.size) #Generate synthetic data to test curve_fit #xdata = np.arange(0,360,0.01) #ydata = f(xdata, 20.0, 130.0, -3.0) + 20*np.random.normal(size=len(xdata)) #Limit sample size #n = 10000 #idx = random.sample(range(xdata.size), n) #xdata = xdata[idx] #ydata = ydata[idx] #Compute robust statistics for 1-degree bins nbins = 360 bin_range = (0., 360.) bin_width = 1.0 bin_count, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='count', nbins=nbins, bin_range=bin_range) bin_med, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='median', nbins=nbins, bin_range=bin_range) #Needed to estimate sigma for weighted lsq #bin_mad, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat=malib.mad, nbins=nbins, bin_range=bin_range) #Started implementing this for more generic binning, needs testing #bin_count, x_bin_edges, y_bin_edges = malib.get_2dhist(xdata, ydata, \ # xlim=bin_range, nbins=(nbins, nbins), stat='count') #Remove any bins with only a few points min_bin_sample_count = 9 idx = (bin_count.filled(0) >= min_bin_sample_count) bin_count = bin_count[idx].data bin_med = bin_med[idx].data #bin_mad = bin_mad[idx].data bin_centers = bin_centers[idx] fit = None fit_fig = None #Want a good distribution of bins, at least 1/4 to 1/2 of sinusoid, to ensure good fit #Need at least 3 valid bins to fit 3 parameters in nuth_func #min_bin_count = 3 min_bin_count = 90 #Not going to help if we have a step function between two plateaus, but better than nothing #Calculate bin aspect spread bin_ptp = np.cos(np.radians(bin_centers)).ptp() min_bin_ptp = 1.0 #Should iterate here, if not enough bins, increase bin width if len(bin_med) >= min_bin_count and bin_ptp >= min_bin_ptp: print("Computing fit") #Unweighted fit fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0)[0] #Weight by observed spread in each bin #sigma = bin_mad #fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0, sigma, absolute_sigma=True)[0] #Weight by bin count #sigma = bin_count.max()/bin_count #fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0, sigma, absolute_sigma=False)[0] print(fit) if plot: print("Generating Nuth and Kaab plot") bin_idx = np.digitize(xdata, bin_edges) output = [] for i in np.arange(1, len(bin_edges)): output.append(ydata[bin_idx==i]) #flierprops={'marker':'.'} lw = 0.25 whiskerprops={'linewidth':lw} capprops={'linewidth':lw} boxprops={'facecolor':'k', 'linewidth':0} medianprops={'marker':'o', 'ms':1, 'color':'r'} fit_fig, ax = plt.subplots(figsize=(6,6)) #widths = (bin_width/2.0) widths = 2.5*(bin_count/bin_count.max()) #widths = bin_count/np.percentile(bin_count, 50) #Stride s=3 #This is inefficient, but we have list of arrays with different length, need to filter #Reduntant with earlier filter, should refactor bp = ax.boxplot(np.array(output)[idx][::s], positions=bin_centers[::s], widths=widths[::s], showfliers=False, \ patch_artist=True, boxprops=boxprops, whiskerprops=whiskerprops, capprops=capprops, \ medianprops=medianprops) bin_ticks = [0, 45, 90, 135, 180, 225, 270, 315, 360] ax.set_xticks(bin_ticks) ax.set_xticklabels(bin_ticks) #Plot the fit f_a = nuth_func(bin_centers, fit[0], fit[1], fit[2]) nuth_func_str = r'$y=%0.2f*cos(%0.2f-x)+%0.2f$' % tuple(fit) ax.plot(bin_centers, f_a, 'b', label=nuth_func_str) ax.set_xlabel('Aspect (deg)') ax.set_ylabel('dh/tan(slope) (m)') ax.axhline(color='gray', linewidth=0.5) ax.set_xlim(*bin_range) ylim = ax.get_ylim() abs_ylim = np.max(np.abs(ylim)) #abs_ylim = np.max(np.abs([ydata.min(), ydata.max()])) #pad = 0.2 * abs_ylim pad = 0 ylim = (-abs_ylim - pad, abs_ylim + pad) minylim = (-10,10) if ylim[0] > minylim[0]: ylim = minylim ax.set_ylim(*ylim) ax.legend(prop={'size':8}) return fit, fit_fig
[ "Compute horizontal offset between input rasters using Nuth and Kaab [2011] (nuth) method\n ", "\n #Mask bins in grid directions, can potentially contain biased stats\n #Especially true for SGM algorithm\n #badbins = [0, 90, 180, 270, 360]\n badbins = [0, 45, 90, 135, 180, 225, 270, 315, 360]\n bin_stat = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_stat)\n bin_edges = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_edges)\n ", "\n #Can pull out medians from boxplot\n #We are computing multiple times, inefficient\n bp_bin_med = []\n for medline in bp['medians']:\n bp_bin_med.append(medline.get_ydata()[0])\n " ]
Please provide a description of the function:def find_first_peak(corr): ind = corr.argmax() s = corr.shape[1] i = ind // s j = ind % s return i, j, corr.max()
[ "\n Find row and column indices of the first correlation peak.\n \n Parameters\n ----------\n corr : np.ndarray\n the correlation map\n \n Returns\n -------\n i : int\n the row index of the correlation peak\n \n j : int\n the column index of the correlation peak \n \n corr_max1 : int\n the value of the correlation peak\n \n Original code from openPIV pyprocess\n\n " ]
Please provide a description of the function:def find_subpixel_peak_position(corr, subpixel_method='gaussian'): # initialization default_peak_position = (corr.shape[0]/2,corr.shape[1]/2) # the peak locations peak1_i, peak1_j, dummy = find_first_peak(corr) try: # the peak and its neighbours: left, right, down, up c = corr[peak1_i, peak1_j] cl = corr[peak1_i-1, peak1_j] cr = corr[peak1_i+1, peak1_j] cd = corr[peak1_i, peak1_j-1] cu = corr[peak1_i, peak1_j+1] # gaussian fit if np.any(np.array([c,cl,cr,cd,cu]) < 0) and subpixel_method == 'gaussian': subpixel_method = 'centroid' try: if subpixel_method == 'centroid': subp_peak_position = (((peak1_i-1)*cl+peak1_i*c+(peak1_i+1)*cr)/(cl+c+cr), ((peak1_j-1)*cd+peak1_j*c+(peak1_j+1)*cu)/(cd+c+cu)) elif subpixel_method == 'gaussian': subp_peak_position = (peak1_i + ((np.log(cl)-np.log(cr))/(2*np.log(cl) - 4*np.log(c) + 2*np.log(cr))), peak1_j + ((np.log(cd)-np.log(cu))/( 2*np.log(cd) - 4*np.log(c) + 2*np.log(cu)))) elif subpixel_method == 'parabolic': subp_peak_position = (peak1_i + (cl-cr)/(2*cl-4*c+2*cr), peak1_j + (cd-cu)/(2*cd-4*c+2*cu)) except: subp_peak_position = default_peak_position except IndexError: subp_peak_position = default_peak_position return subp_peak_position[0], subp_peak_position[1]
[ "\n Find subpixel approximation of the correlation peak.\n \n This function returns a subpixels approximation of the correlation\n peak by using one of the several methods available. If requested, \n the function also returns the signal to noise ratio level evaluated \n from the correlation map.\n \n Parameters\n ----------\n corr : np.ndarray\n the correlation map.\n \n subpixel_method : string\n one of the following methods to estimate subpixel location of the peak: \n 'centroid' [replaces default if correlation map is negative], \n 'gaussian' [default if correlation map is positive], \n 'parabolic'.\n \n Returns\n -------\n subp_peak_position : two elements tuple\n the fractional row and column indices for the sub-pixel\n approximation of the correlation peak.\n\n Original code from openPIV pyprocess\n\n " ]
Please provide a description of the function:def main(): #filenames = !ls *align/*reference-DEM.tif #run ~/src/demtools/error_analysis.py $filenames.s if len(sys.argv) < 1: sys.exit('No input files provided') fn_list = sys.argv[1:] n_samp = len(fn_list) error_dict_list = [] for fn in fn_list: ed = parse_pc_align_log(fn) if 'Translation vector (North-East-Down, meters)' in ed.keys(): error_dict_list.append(ed) import matplotlib.dates as mdates #This is used for interactive display of x-value in plot window date_str = '%Y-%m-%d %H:%M' date_fmt = mdates.DateFormatter(date_str) #ax.fmt_xdata = mdates.DateFormatter(date_fmt) months = mdates.MonthLocator() months_int = mdates.MonthLocator(interval=6) # every n months years = mdates.YearLocator() # every year yearsFmt = mdates.DateFormatter('%Y') #ax.xaxis.set_major_formatter(yearsFmt) #ax.xaxis.set_major_locator(months_int3) print print "n:", len(error_dict_list) #NOTE: changed default to N-E-D on 9/18/15 #Can have significant differences for local proj vs. polar stereographic proj #Should regenerate all previous figures #Local translation on ellipsoid #This appears to be local stereographic projection on ellipsoid key = 'Translation vector (North-East-Down, meters)' val = np.array([e[key] for e in error_dict_list]) #Reformat (n, e, +d) for (x, y, +z) coord sys val[:,[0,1]] = val[:,[1,0]] val[:,2] *= -1 ce90 = geolib.CE90(val[:,0], val[:,1]) le90 = geolib.LE90(val[:,2]) print print key print "CE90:", ce90 print "LE90:", le90 print print 'Centroid (mean) of offsets (local ned meters): ', np.mean(val, axis=0) print 'Centroid (median) of offsets (local ned meters): ', np.median(val, axis=0) #Remove vertical bias remove_vertbias = False if remove_vertbias: print "Removing vertical bias: %0.2f" % np.mean(val, axis=0)[2] val[:,2] -= np.mean(val, axis=0)[2] remove_outliers = False #Flag outliers x_mag = val[:,0] y_mag = val[:,1] h_mag = np.sqrt(val[:,0]**2 + val[:,1]**2) v_mag = val[:,2] mag = np.sqrt(val[:,0]**2 + val[:,1]**2 + val[:,2]**2) abs_thresh = 10.0 p = 98.0 p_thresh = np.percentile(h_mag, p) #print "Outliers with horiz error >= %0.2f (%0.1f%%)" % (p_thresh, p) print "Outliers:" #idx = (h_mag >= p_thresh).nonzero()[0] idx = (h_mag >= ce90).nonzero()[0] idx = np.unique(np.hstack([idx, ((np.abs(v_mag) >= le90).nonzero()[0])])) #Print all #idx = np.arange(h_mag.size) #idx_sort = np.argsort(mag[idx]) #idx = idx[idx_sort] print 'name, m, h, v, x, y, z' for i in idx: print error_dict_list[i]['File'], mag[i], h_mag[i], v_mag[i], val[i,0:3] #Delete from list if remove_outliers: print "Removing from calculation" del error_dict_list[i] if remove_vertbias or remove_outliers: print print "Updated values" print key print "CE90:", geolib.CE90(val[:,0], val[:,1]) print "LE90:", geolib.LE90(val[:,2]) print print 'Centroid (mean) of offsets (local ned meters): ', np.mean(val, axis=0) print 'Centroid (median) of offsets (local ned meters): ', np.median(val, axis=0) #Extract dates date_vec = np.array([e['Date'] for e in error_dict_list]) x = date_vec make_plot3d(val[:,0], val[:,1], val[:,2], title=key) #Note: there is a bug in pdf that displayes surface lines #fig_fn = 'icp_translation_vec_proj_meters.png' fig_fn = 'icp_translation_vec_local_meters.png' #plt.savefig(fig_fn, dpi=600, bbox_inches='tight') fig, ax = plt.subplots(1) key = 'Translation vector (lat,lon,z)' plt.title('ICP translation vector (lat,lon,z): Z component') val = np.array([e[key] for e in error_dict_list]) y = val[:,2] make_plot(x,y,c='b',label=key, abs=False) fig.autofmt_xdate() ax.xaxis.set_minor_locator(months) #ax.xaxis.set_major_locator(months_int) #ax.xaxis.set_major_formatter(date_fmt) ax.fmt_xdata = date_fmt ax.set_ylabel('Z offset (m)') fig, ax = plt.subplots(1) key = 'Translation vector magnitude (meters)' plt.title('ICP Translation vector magnitude (meters)') y = np.array([e[key] for e in error_dict_list]) make_plot(x,y,c='b',label=key, abs=True) fig.autofmt_xdate() ax.xaxis.set_minor_locator(months) #ax.xaxis.set_major_locator(months_int) #ax.xaxis.set_major_formatter(date_fmt) ax.fmt_xdata = date_fmt ax.set_ylabel('Offset (m)') fig, ax = plt.subplots(1) key = 'Number of errors' plt.title('Number of error samples') nerr = np.array([e[key] for e in error_dict_list]) make_plot(x,nerr,c='b',label=key) fig.autofmt_xdate() ax.xaxis.set_minor_locator(months) #ax.xaxis.set_major_locator(months_int) #ax.xaxis.set_major_formatter(date_fmt) ax.fmt_xdata = date_fmt ax.set_ylabel('N samples') fig, ax = plt.subplots(1) plt.title('ICP Mean Error') key = 'Input Mean Error' in_mean = np.array([e[key] for e in error_dict_list]) #make_plot(x,in_mean,c='r',label=key,yerr=in_std) make_plot(x,in_mean,c='r',label=key, abs=True) key = 'Output Mean Error' out_mean = np.array([e[key] for e in error_dict_list]) #make_plot(x,out_mean,c='b',label=key,yerr=out_std) make_plot(x,out_mean,c='b',label=key, abs=True) fig.autofmt_xdate() ax.xaxis.set_minor_locator(months) #ax.xaxis.set_major_locator(months_int) #ax.xaxis.set_major_formatter(date_fmt) ax.fmt_xdata = date_fmt ax.set_ylabel('Mean error (m)') plt.legend(scatterpoints=1, loc='upper left', prop={'size':8}) fig, ax = plt.subplots(1) plt.title('ICP Median Error') key = 'Input 16th Percentile Error' in_16p = np.array([e[key] for e in error_dict_list]) key = 'Input 84th Percentile Error' in_84p = np.array([e[key] for e in error_dict_list]) key = 'Input Median Error' in_med = np.array([e[key] for e in error_dict_list]) make_plot(x,in_med,c='r',label=key,yerr=[in_med - in_16p, in_84p - in_med], abs=True) key = 'Output 16th Percentile Error' out_16p = np.array([e[key] for e in error_dict_list]) key = 'Output 84th Percentile Error' out_84p = np.array([e[key] for e in error_dict_list]) key = 'Output Median Error' out_med = np.array([e[key] for e in error_dict_list]) make_plot(x,out_med,c='b',label=key,yerr=[out_med - out_16p, out_84p - out_med], abs=True) fig.autofmt_xdate() ax.fmt_xdata = mdates.DateFormatter(date_fmt) ax.xaxis.set_minor_locator(months) #ax.xaxis.set_major_locator(months_int) #ax.xaxis.set_major_formatter(date_fmt) ax.fmt_xdata = date_fmt ax.set_ylabel('Median error (m)') plt.legend(scatterpoints=1, loc='upper left', prop={'size':8}) fig_fn = 'icp_median_error.pdf' plt.savefig(fig_fn, dpi=600, bbox_inches='tight') fig, ax = plt.subplots(1) plt.title('Sampled Median Error') key = 'Input Sampled 16th Percentile Error' in_16p = np.ma.fix_invalid([e[key] for e in error_dict_list]) if in_16p.count() > 0: key = 'Input Sampled 84th Percentile Error' in_84p = np.ma.fix_invalid([e[key] for e in error_dict_list]) key = 'Input Sampled Median Error' in_med = np.ma.fix_invalid([e[key] for e in error_dict_list]) in_spread = in_84p - in_16p make_plot(x,in_med,c='r',label=key,yerr=[in_med - in_16p, in_84p - in_med], abs=True) key = 'Output Sampled 16th Percentile Error' out_16p = np.ma.fix_invalid([e[key] for e in error_dict_list]) key = 'Output Sampled 84th Percentile Error' out_84p = np.ma.fix_invalid([e[key] for e in error_dict_list]) key = 'Output Sampled Median Error' out_med = np.ma.fix_invalid([e[key] for e in error_dict_list]) out_spread = out_84p - out_16p p = 90.0 out_med_thresh = np.percentile(out_med, p) out_spread_thresh = np.percentile(out_spread, p) #print "Outliers with horiz error >= %0.2f (%0.1f%%)" % (p_thresh, p) print print "Sampled Error Outliers:" #idx = (h_mag >= p_thresh).nonzero()[0] idx = (out_med >= out_med_thresh).nonzero()[0] idx = np.unique(np.hstack([idx, ((out_spread >= out_spread_thresh).nonzero()[0])])) #Print all idx = np.arange(out_med.size) idx_sort = np.argsort(out_med[idx]) idx = idx[idx_sort] print 'name, samp_mederrr, samp_errspread, nerr' for i in idx: print error_dict_list[i]['File'], out_med[i], out_spread[i], nerr[i] #Delete from list if remove_outliers: print "Removing from calculation" del error_dict_list[i] print print 'Input sampled median error (spread/2): %0.2f (%0.2f)' % (np.median(in_med), np.median(in_spread)/2.) print 'Output sampled median error (spread/2): %0.2f (%0.2f)' % (np.median(out_med), np.median(out_spread)/2.) print make_plot(x,out_med,c='b',label=key,yerr=[out_med - out_16p, out_84p - out_med], abs=True) fig.autofmt_xdate() ax.set_ylabel('Median error (m)') ax.fmt_xdata = mdates.DateFormatter(date_fmt) ax.xaxis.set_minor_locator(months) #ax.xaxis.set_major_locator(months_int) #ax.xaxis.set_major_formatter(date_fmt) ax.fmt_xdata = date_fmt ax.set_ylabel('Median error (m)') plt.legend(scatterpoints=1, loc='upper left', prop={'size':8}) ax.set_ylim(-15,15) fig_fn = 'sampled_median_error.pdf' #fig_fn = 'sampled_median_error_2014-2016.pdf' #from datetime import datetime #ax.set_xlim(datetime(2014,1,1),datetime(2016,7,1)) plt.savefig(fig_fn, dpi=600, bbox_inches='tight')
[ "\n #ECEF translations\n #key = 'Translation vector (ECEF meters)'\n key = 'Translation vector (Cartesian, meters)'\n #key = 'Translation vector (meters)'\n val = np.array([e[key] for e in error_dict_list])\n #make_plot3d(val[:,0], val[:,1], val[:,2], title=key)\n ce90 = geolib.CE90(val[:,0], val[:,1])\n le90 = geolib.LE90(val[:,2])\n print\n print key\n print \"CE90:\", ce90 \n print \"LE90:\", le90 \n print\n\n #Proj translation\n key = 'Translation vector (Proj meters)' \n val = np.array([e[key] for e in error_dict_list])\n ce90 = geolib.CE90(val[:,0], val[:,1])\n le90 = geolib.LE90(val[:,2])\n print\n print key\n print \"CE90:\", ce90 \n print \"LE90:\", le90 \n print\n print 'Centroid (mean) of offsets (Proj meters): ', np.mean(val, axis=0) \n print 'Centroid (median) of offsets (Proj meters): ', np.median(val, axis=0) \n ", "\n fig, ax = plt.subplots(1)\n plt.title('ICP Standard Deviation')\n key = 'Input Std Error'\n in_std = np.array([e[key] for e in error_dict_list])\n make_plot(x,in_std,c='r',label=key)\n key = 'Output Std Error'\n out_std = np.array([e[key] for e in error_dict_list])\n make_plot(x,out_std,c='b',label=key)\n fig.autofmt_xdate()\n ax.xaxis.set_minor_locator(months)\n #ax.xaxis.set_major_locator(months_int)\n #ax.xaxis.set_major_formatter(date_fmt)\n ax.fmt_xdata = date_fmt\n plt.legend(scatterpoints=1)\n " ]
Please provide a description of the function:def _value_length(self, value, t): if isinstance(value, int): fmt = '<%s' % (type_codes[t]) output = struct.pack(fmt, value) return len(output) elif isinstance(value, str): return len(value) + 1 # Account for final 0 len_accum = 0 for x in value: len_accum += self._value_length(x, t) return len_accum
[ "Given an integer or list of them, convert it to an array of bytes." ]
Please provide a description of the function:def _parse_line(self, line_no, line): try: matched = statement.parseString(line) except ParseException as exc: raise DataError("Error parsing line in TileBus file", line_number=line_no, column=exc.col, contents=line) if 'symbol' in matched: self._parse_cmd(matched) elif 'filename' in matched: self._parse_include(matched) elif 'variable' in matched: self._parse_assignment(matched) elif 'configvar' in matched: self._parse_configvar(matched)
[ "Parse a line in a TileBus file\n\n Args:\n line_no (int): The line number for printing useful error messages\n line (string): The line that we are trying to parse\n " ]
Please provide a description of the function:def _validate_information(self): needed_variables = ["ModuleName", "ModuleVersion", "APIVersion"] for var in needed_variables: if var not in self.variables: raise DataError("Needed variable was not defined in mib file.", variable=var) # Make sure ModuleName is <= 6 characters if len(self.variables["ModuleName"]) > 6: raise DataError("ModuleName too long, must be 6 or fewer characters.", module_name=self.variables["ModuleName"]) if not isinstance(self.variables["ModuleVersion"], str): raise ValueError("ModuleVersion ('%s') must be a string of the form X.Y.Z" % str(self.variables['ModuleVersion'])) if not isinstance(self.variables["APIVersion"], str): raise ValueError("APIVersion ('%s') must be a string of the form X.Y" % str(self.variables['APIVersion'])) self.variables['ModuleVersion'] = self._convert_module_version(self.variables["ModuleVersion"]) self.variables['APIVersion'] = self._convert_api_version(self.variables["APIVersion"]) self.variables["ModuleName"] = self.variables["ModuleName"].ljust(6) self.valid = True
[ "Validate that all information has been filled in" ]
Please provide a description of the function:def get_block(self, config_only=False): mib = TBBlock() for cid, config in self.configs.items(): mib.add_config(cid, config) if not config_only: for key, val in self.commands.items(): mib.add_command(key, val) if not self.valid: self._validate_information() mib.set_api_version(*self.variables["APIVersion"]) mib.set_module_version(*self.variables["ModuleVersion"]) mib.set_name(self.variables["ModuleName"]) return mib
[ "Create a TileBus Block based on the information in this descriptor" ]
Please provide a description of the function:def add_adapter(self, adapter): if self._started: raise InternalError("New adapters cannot be added after start() is called") if isinstance(adapter, DeviceAdapter): self._logger.warning("Wrapping legacy device adapter %s in async wrapper", adapter) adapter = AsynchronousModernWrapper(adapter, loop=self._loop) self.adapters.append(adapter) adapter_callback = functools.partial(self.handle_adapter_event, len(self.adapters) - 1) events = ['device_seen', 'broadcast', 'report', 'connection', 'disconnection', 'trace', 'progress'] adapter.register_monitor([None], events, adapter_callback)
[ "Add a device adapter to this aggregating adapter." ]
Please provide a description of the function:def get_config(self, name, default=_MISSING): val = self._config.get(name, default) if val is _MISSING: raise ArgumentError("DeviceAdapter config {} did not exist and no default".format(name)) return val
[ "Get a configuration setting from this DeviceAdapter.\n\n See :meth:`AbstractDeviceAdapter.get_config`.\n " ]
Please provide a description of the function:async def start(self): successful = 0 try: for adapter in self.adapters: await adapter.start() successful += 1 self._started = True except: for adapter in self.adapters[:successful]: await adapter.stop() raise
[ "Start all adapters managed by this device adapter.\n\n If there is an error starting one or more adapters, this method will\n stop any adapters that we successfully started and raise an exception.\n " ]
Please provide a description of the function:def visible_devices(self): devs = {} for device_id, adapters in self._devices.items(): dev = None max_signal = None best_adapter = None for adapter_id, devinfo in adapters.items(): connstring = "adapter/{0}/{1}".format(adapter_id, devinfo['connection_string']) if dev is None: dev = copy.deepcopy(devinfo) del dev['connection_string'] if 'adapters' not in dev: dev['adapters'] = [] best_adapter = adapter_id dev['adapters'].append((adapter_id, devinfo['signal_strength'], connstring)) if max_signal is None: max_signal = devinfo['signal_strength'] elif devinfo['signal_strength'] > max_signal: max_signal = devinfo['signal_strength'] best_adapter = adapter_id # If device has been seen in no adapters, it will get expired # don't return it if dev is None: continue dev['connection_string'] = "device/%x" % dev['uuid'] dev['adapters'] = sorted(dev['adapters'], key=lambda x: x[1], reverse=True) dev['best_adapter'] = best_adapter dev['signal_strength'] = max_signal devs[device_id] = dev return devs
[ "Unify all visible devices across all connected adapters\n\n Returns:\n dict: A dictionary mapping UUIDs to device information dictionaries\n " ]
Please provide a description of the function:async def connect(self, conn_id, connection_string): if connection_string.startswith('device/'): adapter_id, local_conn = self._find_best_adapter(connection_string, conn_id) translate_conn = True elif connection_string.startswith('adapter/'): adapter_str, _, local_conn = connection_string[8:].partition('/') adapter_id = int(adapter_str) translate_conn = False else: raise DeviceAdapterError(conn_id, 'connect', 'invalid connection string format') if self.adapters[adapter_id].can_connect() is False: raise DeviceAdapterError(conn_id, 'connect', 'chosen adapter cannot handle another connection') # Make sure to set up the connection information before # so there are no races with events coming soon after connect. self._setup_connection(conn_id, local_conn) self._track_property(conn_id, 'adapter', adapter_id) self._track_property(conn_id, 'translate', translate_conn) try: await self.adapters[adapter_id].connect(conn_id, local_conn) except: self._teardown_connection(conn_id) raise
[ "Connect to a device.\n\n See :meth:`AbstractDeviceAdapter.connect`.\n " ]
Please provide a description of the function:async def disconnect(self, conn_id): adapter_id = self._get_property(conn_id, 'adapter') await self.adapters[adapter_id].disconnect(conn_id) self._teardown_connection(conn_id)
[ "Disconnect from a connected device.\n\n See :meth:`AbstractDeviceAdapter.disconnect`.\n " ]
Please provide a description of the function:async def open_interface(self, conn_id, interface): adapter_id = self._get_property(conn_id, 'adapter') await self.adapters[adapter_id].open_interface(conn_id, interface)
[ "Open an interface on an IOTile device.\n\n See :meth:`AbstractDeviceAdapter.open_interface`.\n " ]
Please provide a description of the function:async def close_interface(self, conn_id, interface): adapter_id = self._get_property(conn_id, 'adapter') await self.adapters[adapter_id].close_interface(conn_id, interface)
[ "Close an interface on this IOTile device.\n\n See :meth:`AbstractDeviceAdapter.close_interface`.\n " ]
Please provide a description of the function:async def probe(self): for adapter in self.adapters: if adapter.get_config('probe_supported', False): await adapter.probe()
[ "Probe for devices.\n\n This method will probe all adapters that can probe and will send a\n notification for all devices that we have seen from all adapters.\n\n See :meth:`AbstractDeviceAdapter.probe`.\n " ]
Please provide a description of the function:async def send_rpc(self, conn_id, address, rpc_id, payload, timeout): adapter_id = self._get_property(conn_id, 'adapter') return await self.adapters[adapter_id].send_rpc(conn_id, address, rpc_id, payload, timeout)
[ "Send an RPC to a device.\n\n See :meth:`AbstractDeviceAdapter.send_rpc`.\n " ]
Please provide a description of the function:async def debug(self, conn_id, name, cmd_args): adapter_id = self._get_property(conn_id, 'adapter') return await self.adapters[adapter_id].debug(conn_id, name, cmd_args)
[ "Send a debug command to a device.\n\n See :meth:`AbstractDeviceAdapter.debug`.\n " ]
Please provide a description of the function:async def send_script(self, conn_id, data): adapter_id = self._get_property(conn_id, 'adapter') return await self.adapters[adapter_id].send_script(conn_id, data)
[ "Send a script to a device.\n\n See :meth:`AbstractDeviceAdapter.send_script`.\n " ]
Please provide a description of the function:async def handle_adapter_event(self, adapter_id, conn_string, conn_id, name, event): if name == 'device_seen': self._track_device_seen(adapter_id, conn_string, event) event = self._translate_device_seen(adapter_id, conn_string, event) conn_string = self._translate_conn_string(adapter_id, conn_string) elif conn_id is not None and self._get_property(conn_id, 'translate'): conn_string = self._translate_conn_string(adapter_id, conn_string) else: conn_string = "adapter/%d/%s" % (adapter_id, conn_string) await self.notify_event(conn_string, name, event)
[ "Handle an event received from an adapter." ]
Please provide a description of the function:def _device_expiry_callback(self): expired = 0 for adapters in self._devices.values(): to_remove = [] now = monotonic() for adapter_id, dev in adapters.items(): if 'expires' not in dev: continue if now > dev['expires']: to_remove.append(adapter_id) local_conn = "adapter/%d/%s" % (adapter_id, dev['connection_string']) if local_conn in self._conn_strings: del self._conn_strings[local_conn] for entry in to_remove: del adapters[entry] expired += 1 if expired > 0: self._logger.info('Expired %d devices', expired)
[ "Periodic callback to remove expired devices from visible_devices." ]
Please provide a description of the function:def PathIsDir(self, key, val, env): if not os.path.isdir(val): if os.path.isfile(val): m = 'Directory path for option %s is a file: %s' else: m = 'Directory path for option %s does not exist: %s' raise SCons.Errors.UserError(m % (key, val))
[ "Validator to check if Path is a directory." ]
Please provide a description of the function:def PathExists(self, key, val, env): if not os.path.exists(val): m = 'Path for option %s does not exist: %s' raise SCons.Errors.UserError(m % (key, val))
[ "Validator to check if Path exists" ]
Please provide a description of the function:async def process_graph_input(graph, stream, value, rpc_executor): graph.sensor_log.push(stream, value) # FIXME: This should be specified in our device model if stream.important: associated_output = stream.associated_stream() graph.sensor_log.push(associated_output, value) to_check = deque([x for x in graph.roots]) while len(to_check) > 0: node = to_check.popleft() if node.triggered(): try: results = node.process(rpc_executor, graph.mark_streamer) for result in results: if inspect.iscoroutine(result.value): result.value = await asyncio.ensure_future(result.value) result.raw_time = value.raw_time graph.sensor_log.push(node.stream, result) except: logging.getLogger(__name__).exception("Unhandled exception in graph node processing function for node %s", str(node)) # If we generated any outputs, notify our downstream nodes # so that they are also checked to see if they should run. if len(results) > 0: to_check.extend(node.outputs)
[ "Process an input through this sensor graph.\n\n The tick information in value should be correct and is transfered\n to all results produced by nodes acting on this tick. This coroutine\n is an asyncio compatible version of SensorGraph.process_input()\n\n Args:\n stream (DataStream): The stream the input is part of\n value (IOTileReading): The value to process\n rpc_executor (RPCExecutor): An object capable of executing RPCs\n in case we need to do that.\n " ]
Please provide a description of the function:def clear_to_reset(self, config_vars): super(SensorGraphSubsystem, self).clear_to_reset(config_vars) self.graph.clear() if not self.persisted_exists: return for node in self.persisted_nodes: self.graph.add_node(node) for streamer_desc in self.persisted_streamers: streamer = streamer_descriptor.parse_string_descriptor(streamer_desc) self.graph.add_streamer(streamer) # Load in the constants for stream, reading in self.persisted_constants: self._sensor_log.push(stream, reading) self.enabled = True # Set up all streamers for index, value in self.streamer_acks.items(): self._seek_streamer(index, value)
[ "Clear all volatile information across a reset.\n\n The reset behavior is that:\n - any persisted sensor_graph is loaded\n - if there is a persisted graph found, enabled is set to True\n - if there is a persisted graph found, reset readings are pushed\n into it.\n " ]
Please provide a description of the function:def process_input(self, encoded_stream, value): if not self.enabled: return if isinstance(encoded_stream, str): stream = DataStream.FromString(encoded_stream) encoded_stream = stream.encode() elif isinstance(encoded_stream, DataStream): stream = encoded_stream encoded_stream = stream.encode() else: stream = DataStream.FromEncoded(encoded_stream) reading = IOTileReading(self.get_timestamp(), encoded_stream, value) self._inputs.put_nowait((stream, reading))
[ "Process or drop a graph input.\n\n This method asynchronously queued an item to be processed by the\n sensorgraph worker task in _reset_vector. It must be called from\n inside the emulation loop and returns immediately before the input is\n processed.\n " ]
Please provide a description of the function:def _seek_streamer(self, index, value): highest_id = self._rsl.highest_stored_id() streamer = self.graph.streamers[index] if not streamer.walker.buffered: return _pack_sgerror(SensorLogError.CANNOT_USE_UNBUFFERED_STREAM) find_type = None try: exact = streamer.walker.seek(value, target='id') if exact: find_type = 'exact' else: find_type = 'other_stream' except UnresolvedIdentifierError: if value > highest_id: find_type = 'too_high' else: find_type = 'too_low' # If we found an exact match, move one beyond it if find_type == 'exact': try: streamer.walker.pop() except StreamEmptyError: pass error = Error.NO_ERROR elif find_type == 'too_high': streamer.walker.skip_all() error = _pack_sgerror(SensorLogError.NO_MORE_READINGS) elif find_type == 'too_low': streamer.walker.seek(0, target='offset') error = _pack_sgerror(SensorLogError.NO_MORE_READINGS) else: error = _pack_sgerror(SensorLogError.ID_FOUND_FOR_ANOTHER_STREAM) return error
[ "Complex logic for actually seeking a streamer to a reading_id.\n\n This routine hides all of the gnarly logic of the various edge cases.\n In particular, the behavior depends on whether the reading id is found,\n and if it is found, whether it belongs to the indicated streamer or not.\n\n If not, the behavior depends on whether the sought reading it too high\n or too low.\n " ]
Please provide a description of the function:def acknowledge_streamer(self, index, ack, force): if index >= len(self.graph.streamers): return _pack_sgerror(SensorGraphError.STREAMER_NOT_ALLOCATED) old_ack = self.streamer_acks.get(index, 0) if ack != 0: if ack <= old_ack and not force: return _pack_sgerror(SensorGraphError.OLD_ACKNOWLEDGE_UPDATE) self.streamer_acks[index] = ack current_ack = self.streamer_acks.get(index, 0) return self._seek_streamer(index, current_ack)
[ "Acknowledge a streamer value as received from the remote side." ]
Please provide a description of the function:def _handle_streamer_finished(self, index, succeeded, highest_ack): self._logger.debug("Rolling back streamer %d after streaming, highest ack from streaming subsystem was %d", index, highest_ack) self.acknowledge_streamer(index, highest_ack, False)
[ "Callback when a streamer finishes processing." ]
Please provide a description of the function:def process_streamers(self): # Check for any triggered streamers and pass them to stream manager in_progress = self._stream_manager.in_progress() triggered = self.graph.check_streamers(blacklist=in_progress) for streamer in triggered: self._stream_manager.process_streamer(streamer, callback=self._handle_streamer_finished)
[ "Check if any streamers should be handed to the stream manager." ]
Please provide a description of the function:def trigger_streamer(self, index): self._logger.debug("trigger_streamer RPC called on streamer %d", index) if index >= len(self.graph.streamers): return _pack_sgerror(SensorGraphError.STREAMER_NOT_ALLOCATED) if index in self._stream_manager.in_progress(): return _pack_sgerror(SensorGraphError.STREAM_ALREADY_IN_PROGRESS) streamer = self.graph.streamers[index] if not streamer.triggered(manual=True): return _pack_sgerror(SensorGraphError.STREAMER_HAS_NO_NEW_DATA) self._logger.debug("calling mark_streamer on streamer %d from trigger_streamer RPC", index) self.graph.mark_streamer(index) self.process_streamers() return Error.NO_ERROR
[ "Pass a streamer to the stream manager if it has data." ]
Please provide a description of the function:def persist(self): self.persisted_nodes = self.graph.dump_nodes() self.persisted_streamers = self.graph.dump_streamers() self.persisted_exists = True self.persisted_constants = self._sensor_log.dump_constants()
[ "Trigger saving the current sensorgraph to persistent storage." ]
Please provide a description of the function:def reset(self): self.persisted_exists = False self.persisted_nodes = [] self.persisted_streamers = [] self.persisted_constants = [] self.graph.clear() self.streamer_status = {}
[ "Clear the sensorgraph from RAM and flash." ]
Please provide a description of the function:def add_node(self, binary_descriptor): try: node_string = parse_binary_descriptor(binary_descriptor) except: self._logger.exception("Error parsing binary node descriptor: %s", binary_descriptor) return _pack_sgerror(SensorGraphError.INVALID_NODE_STREAM) # FIXME: Actually provide the correct error codes here try: self.graph.add_node(node_string) except NodeConnectionError: return _pack_sgerror(SensorGraphError.STREAM_NOT_IN_USE) except ProcessingFunctionError: return _pack_sgerror(SensorGraphError.INVALID_PROCESSING_FUNCTION) except ResourceUsageError: return _pack_sgerror(SensorGraphError.NO_NODE_SPACE_AVAILABLE) return Error.NO_ERROR
[ "Add a node to the sensor_graph using a binary node descriptor.\n\n Args:\n binary_descriptor (bytes): An encoded binary node descriptor.\n\n Returns:\n int: A packed error code.\n " ]
Please provide a description of the function:def add_streamer(self, binary_descriptor): streamer = streamer_descriptor.parse_binary_descriptor(binary_descriptor) try: self.graph.add_streamer(streamer) self.streamer_status[len(self.graph.streamers) - 1] = StreamerStatus() return Error.NO_ERROR except ResourceUsageError: return _pack_sgerror(SensorGraphError.NO_MORE_STREAMER_RESOURCES)
[ "Add a streamer to the sensor_graph using a binary streamer descriptor.\n\n Args:\n binary_descriptor (bytes): An encoded binary streamer descriptor.\n\n Returns:\n int: A packed error code\n " ]
Please provide a description of the function:def inspect_streamer(self, index): if index >= len(self.graph.streamers): return [_pack_sgerror(SensorGraphError.STREAMER_NOT_ALLOCATED), b'\0'*14] return [Error.NO_ERROR, streamer_descriptor.create_binary_descriptor(self.graph.streamers[index])]
[ "Inspect the streamer at the given index." ]
Please provide a description of the function:def inspect_node(self, index): if index >= len(self.graph.nodes): raise RPCErrorCode(6) #FIXME: use actual error code here for UNKNOWN_ERROR status return create_binary_descriptor(str(self.graph.nodes[index]))
[ "Inspect the graph node at the given index." ]
Please provide a description of the function:def query_streamer(self, index): if index >= len(self.graph.streamers): return None info = self.streamer_status[index] highest_ack = self.streamer_acks.get(index, 0) return [info.last_attempt_time, info.last_success_time, info.last_error, highest_ack, info.last_status, info.attempt_number, info.comm_status]
[ "Query the status of the streamer at the given index." ]
Please provide a description of the function:def sg_set_online(self, online): self.sensor_graph.enabled = bool(online) return [Error.NO_ERROR]
[ "Set the sensor-graph online/offline." ]
Please provide a description of the function:def sg_graph_input(self, value, stream_id): self.sensor_graph.process_input(stream_id, value) return [Error.NO_ERROR]
[ "\"Present a graph input to the sensor_graph subsystem." ]
Please provide a description of the function:def sg_add_streamer(self, desc): if len(desc) == 13: desc += b'\0' err = self.sensor_graph.add_streamer(desc) return [err]
[ "Add a graph streamer using a binary descriptor." ]
Please provide a description of the function:def sg_seek_streamer(self, index, force, value): force = bool(force) err = self.sensor_graph.acknowledge_streamer(index, value, force) return [err]
[ "Ackowledge a streamer." ]
Please provide a description of the function:def sg_query_streamer(self, index): resp = self.sensor_graph.query_streamer(index) if resp is None: return [struct.pack("<L", _pack_sgerror(SensorGraphError.STREAMER_NOT_ALLOCATED))] return [struct.pack("<LLLLBBBx", *resp)]
[ "Query the current status of a streamer." ]
Please provide a description of the function:def dispatch(self, value, callback=None): done = None if callback is None: done = threading.Event() shared_data = [None, None] def _callback(exc_info, return_value): shared_data[0] = exc_info shared_data[1] = return_value done.set() callback = _callback workitem = WorkItem(value, callback) self._work_queue.put(workitem) if done is None: return None done.wait() exc_info, return_value = shared_data if exc_info is not None: self.future_raise(*exc_info) return return_value
[ "Dispatch an item to the workqueue and optionally wait.\n\n This is the only way to add work to the background work queue. Unless\n you also pass a callback object, this method will synchronously wait\n for the work to finish and return the result. If the work raises an\n exception, the exception will be reraised in this method.\n\n If you pass an optional callback(exc_info, return_value), this method\n will not block and instead your callback will be called when the work\n finishes. If an exception was raised during processing, exc_info will\n be set with the contents of sys.exc_info(). Otherwise, exc_info will\n be None and whatever the work_queue handler returned will be passed as\n the return_value parameter to the supplied callback.\n\n Args:\n value (object): Arbitrary object that will be passed to the work\n queue handler.\n\n callback (callable): Optional callback to receive the result of\n the work queue when it finishes. If not passed, this method\n will be synchronous and return the result from the dispatch()\n method itself\n\n Returns:\n object: The result of the work_queue handler function or None.\n\n If callback is not None, then this method will return immediately\n with a None return value. Otherwise it will block until the work\n item is finished (including any work items ahead in the queue) and\n return whatever the work item handler returned.\n " ]
Please provide a description of the function:def future_raise(self, tp, value=None, tb=None): if value is not None and isinstance(tp, Exception): raise TypeError("instance exception may not have a separate value") if value is not None: exc = tp(value) else: exc = tp if exc.__traceback__ is not tb: raise exc.with_traceback(tb) raise exc
[ "raise_ implementation from future.utils" ]
Please provide a description of the function:def flush(self): done = threading.Event() def _callback(): done.set() self.defer(_callback) done.wait()
[ "Synchronously wait until this work item is processed.\n\n This has the effect of waiting until all work items queued before this\n method has been called have finished.\n " ]
Please provide a description of the function:def wait_until_idle(self): done = threading.Event() def _callback(): done.set() self.defer_until_idle(_callback) done.wait()
[ "Block the calling thread until the work queue is (temporarily) empty.\n\n See the detailed discussion under defer_until_idle() for restrictions\n and expected use cases for this method.\n\n This routine will block the calling thread.\n " ]
Please provide a description of the function:def direct_dispatch(self, arg, callback): try: self._current_callbacks.appendleft(callback) exc_info = None retval = None retval = self._routine(arg) except: # pylint:disable=bare-except;We need to capture the exception and feed it back to the caller exc_info = sys.exc_info() finally: self._current_callbacks.popleft() if callback is not None and retval is not self.STILL_PENDING: callback(exc_info, retval) return retval, exc_info
[ "Directly dispatch a work item.\n\n This method MUST only be called from inside of another work item and\n will synchronously invoke the work item as if it was passed to\n dispatch(). Calling this method from any other thread has undefined\n consequences since it will be unsynchronized with respect to items\n dispatched from inside the background work queue itself.\n " ]
Please provide a description of the function:def run(self): idle_watchers = [] while True: try: if self._work_queue.empty() and len(idle_watchers) > 0: for watcher in idle_watchers: try: watcher() except: # pylint:disable=bare-except;We can't let one idle watcher failure impact any other watcher self._logger.exception("Error inside queue idle watcher") idle_watchers = [] item = self._work_queue.get() # Handle special actions that are not RPCs if item is STOP_WORKER_ITEM: return elif isinstance(item, MarkLocationItem): item.callback() continue elif isinstance(item, WaitIdleItem): idle_watchers.append(item.callback) continue elif not isinstance(item, WorkItem): self._logger.error("Invalid item passed to WorkQueueThread: %s, ignoring", item) continue self.direct_dispatch(item.arg, item.callback) except: # pylint:disable=bare-except;We cannot let this background thread die until we are told to stop() self._logger.exception("Error inside background workqueue thread")
[ "The target routine called to start thread activity." ]
Please provide a description of the function:def stop(self, timeout=None, force=False): self.signal_stop() self.wait_stopped(timeout, force)
[ "Stop the worker thread and synchronously wait for it to finish.\n\n Args:\n timeout (float): The maximum time to wait for the thread to stop\n before raising a TimeoutExpiredError. If force is True, TimeoutExpiredError\n is not raised and the thread is just marked as a daemon thread\n so that it does not block cleanly exiting the process.\n force (bool): If true and the thread does not exit in timeout seconds\n no error is raised since the thread is marked as daemon and will\n be killed when the process exits.\n " ]
Please provide a description of the function:def wait_stopped(self, timeout=None, force=False): self.join(timeout) if self.is_alive() and force is False: raise TimeoutExpiredError("Error waiting for background thread to exit", timeout=timeout)
[ "Wait for the thread to stop.\n\n You must have previously called signal_stop or this function will\n hang.\n\n Args:\n\n timeout (float): The maximum time to wait for the thread to stop\n before raising a TimeoutExpiredError. If force is True,\n TimeoutExpiredError is not raised and the thread is just\n marked as a daemon thread so that it does not block cleanly\n exiting the process.\n force (bool): If true and the thread does not exit in timeout seconds\n no error is raised since the thread is marked as daemon and will\n be killed when the process exits.\n " ]
Please provide a description of the function:def generate(env): if not exists(env): return env['WIXCANDLEFLAGS'] = ['-nologo'] env['WIXCANDLEINCLUDE'] = [] env['WIXCANDLECOM'] = '$WIXCANDLE $WIXCANDLEFLAGS -I $WIXCANDLEINCLUDE -o ${TARGET} ${SOURCE}' env['WIXLIGHTFLAGS'].append( '-nologo' ) env['WIXLIGHTCOM'] = "$WIXLIGHT $WIXLIGHTFLAGS -out ${TARGET} ${SOURCES}" env['WIXSRCSUF'] = '.wxs' env['WIXOBJSUF'] = '.wixobj' object_builder = SCons.Builder.Builder( action = '$WIXCANDLECOM', suffix = '$WIXOBJSUF', src_suffix = '$WIXSRCSUF') linker_builder = SCons.Builder.Builder( action = '$WIXLIGHTCOM', src_suffix = '$WIXOBJSUF', src_builder = object_builder) env['BUILDERS']['WiX'] = linker_builder
[ "Add Builders and construction variables for WiX to an Environment." ]
Please provide a description of the function:def FromString(cls, desc): if language.stream is None: language.get_language() parse_exp = Optional(time_interval('time') - Literal(':').suppress()) - language.stream('stream') - Literal('=').suppress() - number('value') try: data = parse_exp.parseString(desc) time = 0 if 'time' in data: time = data['time'][0] return SimulationStimulus(time, data['stream'][0], data['value']) except (ParseException, ParseSyntaxException): raise ArgumentError("Could not parse stimulus descriptor", descriptor=desc)
[ "Create a new stimulus from a description string.\n\n The string must have the format:\n\n [time: ][system ]input X = Y\n where X and Y are integers. The time, if given must\n be a time_interval, which is an integer followed by a\n time unit such as second(s), minute(s), etc.\n\n Args:\n desc (str): A string description of the stimulus.\n\n Returns:\n SimulationStimulus: The parsed stimulus object.\n " ]
Please provide a description of the function:def get_connection_id(self, conn_or_int_id): key = conn_or_int_id if isinstance(key, str): table = self._int_connections elif isinstance(key, int): table = self._connections else: raise ArgumentError("You must supply either an int connection id or a string internal id to _get_connection_state", id=key) try: data = table[key] except KeyError: raise ArgumentError("Could not find connection by id", id=key) return data['conn_id']
[ "Get the connection id.\n\n Args:\n conn_or_int_id (int, string): The external integer connection id or\n and internal string connection id\n\n Returns:\n dict: The context data associated with that connection or None if it cannot\n be found.\n\n Raises:\n ArgumentError: When the key is not found in the list of active connections\n or is invalid.\n " ]
Please provide a description of the function:def _get_connection(self, conn_or_int_id): key = conn_or_int_id if isinstance(key, str): table = self._int_connections elif isinstance(key, int): table = self._connections else: return None try: data = table[key] except KeyError: return None return data
[ "Get the data for a connection by either conn_id or internal_id\n\n Args:\n conn_or_int_id (int, string): The external integer connection id or\n and internal string connection id\n\n Returns:\n dict: The context data associated with that connection or None if it cannot\n be found.\n\n Raises:\n ArgumentError: When the key is not found in the list of active connections\n or is invalid.\n " ]
Please provide a description of the function:def _get_connection_state(self, conn_or_int_id): key = conn_or_int_id if isinstance(key, str): table = self._int_connections elif isinstance(key, int): table = self._connections else: raise ArgumentError("You must supply either an int connection id or a string internal id to _get_connection_state", id=key) if key not in table: return self.Disconnected data = table[key] return data['state']
[ "Get a connection's state by either conn_id or internal_id\n\n This routine must only be called from the internal worker thread.\n\n Args:\n conn_or_int_id (int, string): The external integer connection id or\n and internal string connection id\n " ]
Please provide a description of the function:def _check_timeouts(self): for conn_id, data in self._connections.items(): if 'timeout' in data and data['timeout'].expired: if data['state'] == self.Connecting: self.finish_connection(conn_id, False, 'Connection attempt timed out') elif data['state'] == self.Disconnecting: self.finish_disconnection(conn_id, False, 'Disconnection attempt timed out') elif data['state'] == self.InProgress: if data['microstate'] == 'rpc': self.finish_operation(conn_id, False, 'RPC timed out without response', None, None) elif data['microstate'] == 'open_interface': self.finish_operation(conn_id, False, 'Open interface request timed out')
[ "Check if any operations in progress need to be timed out\n\n Adds the corresponding finish action that fails the request due to a\n timeout.\n " ]
Please provide a description of the function:def _begin_connection_action(self, action): conn_id = action.data['connection_id'] int_id = action.data['internal_id'] callback = action.data['callback'] # Make sure we are not reusing an id that is currently connected to something if self._get_connection_state(conn_id) != self.Disconnected: print(self._connections[conn_id]) callback(conn_id, self.id, False, 'Connection ID is already in use for another connection') return if self._get_connection_state(int_id) != self.Disconnected: callback(conn_id, self.id, False, 'Internal ID is already in use for another connection') return conn_data = { 'state': self.Connecting, 'microstate': None, 'conn_id': conn_id, 'int_id': int_id, 'callback': callback, 'timeout': action.timeout, 'context': action.data['context'] } self._connections[conn_id] = conn_data self._int_connections[int_id] = conn_data
[ "Begin a connection attempt\n\n Args:\n action (ConnectionAction): the action object describing what we are\n connecting to\n " ]
Please provide a description of the function:def _finish_connection_action(self, action): success = action.data['success'] conn_key = action.data['id'] if self._get_connection_state(conn_key) != self.Connecting: print("Invalid finish_connection action on a connection whose state is not Connecting, conn_key=%s" % str(conn_key)) return # Cannot be None since we checked above to make sure it exists data = self._get_connection(conn_key) callback = data['callback'] conn_id = data['conn_id'] int_id = data['int_id'] if success is False: reason = action.data['reason'] if reason is None: reason = "No reason was given" del self._connections[conn_id] del self._int_connections[int_id] callback(conn_id, self.id, False, reason) else: data['state'] = self.Idle data['microstate'] = None data['callback'] = None callback(conn_id, self.id, True, None)
[ "Finish a connection attempt\n\n Args:\n action (ConnectionAction): the action object describing what we are\n connecting to and what the result of the operation was\n " ]
Please provide a description of the function:def unexpected_disconnect(self, conn_or_internal_id): data = { 'id': conn_or_internal_id } action = ConnectionAction('force_disconnect', data, sync=False) self._actions.put(action)
[ "Notify that there was an unexpected disconnection of the device.\n\n Any in progress operations are canceled cleanly and the device is transitioned\n to a disconnected state.\n\n Args:\n conn_or_internal_id (string, int): Either an integer connection id or a string\n internal_id\n " ]
Please provide a description of the function:def begin_disconnection(self, conn_or_internal_id, callback, timeout): data = { 'id': conn_or_internal_id, 'callback': callback } action = ConnectionAction('begin_disconnection', data, timeout=timeout, sync=False) self._actions.put(action)
[ "Begin a disconnection attempt\n\n Args:\n conn_or_internal_id (string, int): Either an integer connection id or a string\n internal_id\n callback (callable): Callback to call when this disconnection attempt either\n succeeds or fails\n timeout (float): How long to allow this connection attempt to proceed\n without timing it out (in seconds)\n " ]
Please provide a description of the function:def _force_disconnect_action(self, action): conn_key = action.data['id'] if self._get_connection_state(conn_key) == self.Disconnected: return data = self._get_connection(conn_key) # If there are any operations in progress, cancel them cleanly if data['state'] == self.Connecting: callback = data['callback'] callback(False, 'Unexpected disconnection') elif data['state'] == self.Disconnecting: callback = data['callback'] callback(True, None) elif data['state'] == self.InProgress: callback = data['callback'] if data['microstate'] == 'rpc': callback(False, 'Unexpected disconnection', None, None) elif data['microstate'] == 'open_interface': callback(False, 'Unexpected disconnection') elif data['microstate'] == 'close_interface': callback(False, 'Unexpected disconnection') int_id = data['int_id'] conn_id = data['conn_id'] del self._connections[conn_id] del self._int_connections[int_id]
[ "Forcibly disconnect a device.\n\n Args:\n action (ConnectionAction): the action object describing what we are\n forcibly disconnecting\n " ]
Please provide a description of the function:def _begin_disconnection_action(self, action): conn_key = action.data['id'] callback = action.data['callback'] if self._get_connection_state(conn_key) != self.Idle: callback(conn_key, self.id, False, 'Cannot start disconnection, connection is not idle') return # Cannot be None since we checked above to make sure it exists data = self._get_connection(conn_key) data['state'] = self.Disconnecting data['microstate'] = None data['callback'] = callback data['timeout'] = action.timeout
[ "Begin a disconnection attempt\n\n Args:\n action (ConnectionAction): the action object describing what we are\n connecting to and what the result of the operation was\n " ]
Please provide a description of the function:def _finish_disconnection_action(self, action): success = action.data['success'] conn_key = action.data['id'] if self._get_connection_state(conn_key) != self.Disconnecting: self._logger.error("Invalid finish_disconnection action on a connection whose state is not Disconnecting, conn_key=%s", str(conn_key)) return # Cannot be None since we checked above to make sure it exists data = self._get_connection(conn_key) callback = data['callback'] conn_id = data['conn_id'] int_id = data['int_id'] if success is False: reason = action.data['reason'] if reason is None: reason = "No reason was given" data['state'] = self.Idle data['microstate'] = None data['callback'] = None callback(conn_id, self.id, False, reason) else: del self._connections[conn_id] del self._int_connections[int_id] callback(conn_id, self.id, True, None)
[ "Finish a disconnection attempt\n\n There are two possible outcomes:\n - if we were successful at disconnecting, we transition to disconnected\n - if we failed at disconnecting, we transition back to idle\n\n Args:\n action (ConnectionAction): the action object describing what we are\n disconnecting from and what the result of the operation was\n " ]
Please provide a description of the function:def finish_operation(self, conn_or_internal_id, success, *args): data = { 'id': conn_or_internal_id, 'success': success, 'callback_args': args } action = ConnectionAction('finish_operation', data, sync=False) self._actions.put(action)
[ "Finish an operation on a connection.\n\n Args:\n conn_or_internal_id (string, int): Either an integer connection id or a string\n internal_id\n success (bool): Whether the operation was successful\n failure_reason (string): Optional reason why the operation failed\n result (dict): Optional dictionary containing the results of the operation\n " ]
Please provide a description of the function:def _finish_operation_action(self, action): success = action.data['success'] conn_key = action.data['id'] if self._get_connection_state(conn_key) != self.InProgress: self._logger.error("Invalid finish_operation action on a connection whose state is not InProgress, conn_key=%s", str(conn_key)) return # Cannot be None since we checked above to make sure it exists data = self._get_connection(conn_key) callback = data['callback'] conn_id = data['conn_id'] args = action.data['callback_args'] data['state'] = self.Idle data['microstate'] = None callback(conn_id, self.id, success, *args)
[ "Finish an attempted operation.\n\n Args:\n action (ConnectionAction): the action object describing the result\n of the operation that we are finishing\n " ]
Please provide a description of the function:def canonical_text(self, text): out = [] line_continues_a_comment = False for line in text.splitlines(): line,comment = self.comment_re.findall(line)[0] if line_continues_a_comment == True: out[-1] = out[-1] + line.lstrip() else: out.append(line) line_continues_a_comment = len(comment) > 0 return '\n'.join(out).rstrip()+'\n'
[ "Standardize an input TeX-file contents.\n\n Currently:\n * removes comments, unwrapping comment-wrapped lines.\n " ]
Please provide a description of the function:def scan_recurse(self, node, path=()): path_dict = dict(list(path)) queue = [] queue.extend( self.scan(node) ) seen = {} # This is a hand-coded DSU (decorate-sort-undecorate, or # Schwartzian transform) pattern. The sort key is the raw name # of the file as specifed on the \include, \input, etc. line. # TODO: what about the comment in the original Classic scanner: # nodes = [] source_dir = node.get_dir() #for include in includes: while queue: include = queue.pop() inc_type, inc_subdir, inc_filename = include try: if seen[inc_filename] == 1: continue except KeyError: seen[inc_filename] = 1 # # Handle multiple filenames in include[1] # n, i = self.find_include(include, source_dir, path_dict) if n is None: # Do not bother with 'usepackage' warnings, as they most # likely refer to system-level files if inc_type != 'usepackage': SCons.Warnings.warn(SCons.Warnings.DependencyWarning, "No dependency generated for file: %s (included from: %s) -- file not found" % (i, node)) else: sortkey = self.sort_key(n) nodes.append((sortkey, n)) # recurse down queue.extend( self.scan(n, inc_subdir) ) return [pair[1] for pair in sorted(nodes)]
[ " do a recursive scan of the top level target file\n This lets us search for included files based on the\n directory of the main file just as latex does", "which lets\n # us keep the sort order constant regardless of whether the file\n # is actually found in a Repository or locally." ]
Please provide a description of the function:def caller_trace(back=0): global caller_bases, caller_dicts import traceback tb = traceback.extract_stack(limit=3+back) tb.reverse() callee = tb[1][:3] caller_bases[callee] = caller_bases.get(callee, 0) + 1 for caller in tb[2:]: caller = callee + caller[:3] try: entry = caller_dicts[callee] except KeyError: caller_dicts[callee] = entry = {} entry[caller] = entry.get(caller, 0) + 1 callee = caller
[ "\n Trace caller stack and save info into global dicts, which\n are printed automatically at the end of SCons execution.\n " ]
Please provide a description of the function:def Trace(msg, file=None, mode='w', tstamp=None): global TraceDefault global TimeStampDefault global PreviousTime if file is None: file = TraceDefault else: TraceDefault = file if tstamp is None: tstamp = TimeStampDefault else: TimeStampDefault = tstamp try: fp = TraceFP[file] except KeyError: try: fp = TraceFP[file] = open(file, mode) except TypeError: # Assume we were passed an open file pointer. fp = file if tstamp: now = time.time() fp.write('%8.4f %8.4f: ' % (now - StartTime, now - PreviousTime)) PreviousTime = now fp.write(msg) fp.flush() fp.close()
[ "Write a trace message to a file. Whenever a file is specified,\n it becomes the default for the next call to Trace()." ]
Please provide a description of the function:def verify(self, obj): if isinstance(obj, str): raise ValidationError("Object was not a list", reason="a string was passed instead of a list", object=obj) out_obj = [] if self._min_length is not None and len(obj) < self._min_length: raise ValidationError("List was too short", reason="list length %d was less than the minimum %d" % (len(obj), self._min_length), min_length=self._min_length, actual_length=len(obj)) if self._max_length is not None and len(obj) > self._max_length: raise ValidationError("List was too long", reason="list length %d was greater than the max %d" % (len(obj), self._max_length), min_length=self._max_length, actual_length=len(obj)) for val in obj: out_obj.append(self._verifier.verify(val)) return out_obj
[ "Verify that the object conforms to this verifier's schema\n\n Args:\n obj (object): A python object to verify\n\n Raises:\n ValidationError: If there is a problem verifying the dictionary, a\n ValidationError is thrown with at least the reason key set indicating\n the reason for the lack of validation.\n " ]
Please provide a description of the function:def hex2bin(fin, fout, start=None, end=None, size=None, pad=None): try: h = IntelHex(fin) except HexReaderError: e = sys.exc_info()[1] # current exception txt = "ERROR: bad HEX file: %s" % str(e) print(txt) return 1 # start, end, size if size != None and size != 0: if end == None: if start == None: start = h.minaddr() end = start + size - 1 else: if (end+1) >= size: start = end + 1 - size else: start = 0 try: if pad is not None: # using .padding attribute rather than pad argument to function call h.padding = pad h.tobinfile(fout, start, end) except IOError: e = sys.exc_info()[1] # current exception txt = "ERROR: Could not write to file: %s: %s" % (fout, str(e)) print(txt) return 1 return 0
[ "Hex-to-Bin convertor engine.\n @return 0 if all OK\n\n @param fin input hex file (filename or file-like object)\n @param fout output bin file (filename or file-like object)\n @param start start of address range (optional)\n @param end end of address range (inclusive; optional)\n @param size size of resulting file (in bytes) (optional)\n @param pad padding byte (optional)\n " ]
Please provide a description of the function:def bin2hex(fin, fout, offset=0): h = IntelHex() try: h.loadbin(fin, offset) except IOError: e = sys.exc_info()[1] # current exception txt = 'ERROR: unable to load bin file:', str(e) print(txt) return 1 try: h.tofile(fout, format='hex') except IOError: e = sys.exc_info()[1] # current exception txt = "ERROR: Could not write to file: %s: %s" % (fout, str(e)) print(txt) return 1 return 0
[ "Simple bin-to-hex convertor.\n @return 0 if all OK\n\n @param fin input bin file (filename or file-like object)\n @param fout output hex file (filename or file-like object)\n @param offset starting address offset for loading bin\n " ]
Please provide a description of the function:def diff_dumps(ih1, ih2, tofile=None, name1="a", name2="b", n_context=3): def prepare_lines(ih): sio = StringIO() ih.dump(sio) dump = sio.getvalue() lines = dump.splitlines() return lines a = prepare_lines(ih1) b = prepare_lines(ih2) import difflib result = list(difflib.unified_diff(a, b, fromfile=name1, tofile=name2, n=n_context, lineterm='')) if tofile is None: tofile = sys.stdout output = '\n'.join(result)+'\n' tofile.write(output)
[ "Diff 2 IntelHex objects and produce unified diff output for their\n hex dumps.\n\n @param ih1 first IntelHex object to compare\n @param ih2 second IntelHex object to compare\n @param tofile file-like object to write output\n @param name1 name of the first hex file to show in the diff header\n @param name2 name of the first hex file to show in the diff header\n @param n_context number of context lines in the unidiff output\n " ]
Please provide a description of the function:def _get_file_and_addr_range(s, _support_drive_letter=None): if _support_drive_letter is None: _support_drive_letter = (os.name == 'nt') drive = '' if _support_drive_letter: if s[1:2] == ':' and s[0].upper() in ''.join([chr(i) for i in range_g(ord('A'), ord('Z')+1)]): drive = s[:2] s = s[2:] parts = s.split(':') n = len(parts) if n == 1: fname = parts[0] fstart = None fend = None elif n != 3: raise _BadFileNotation else: fname = parts[0] def ascii_hex_to_int(ascii): if ascii is not None: try: return int(ascii, 16) except ValueError: raise _BadFileNotation return ascii fstart = ascii_hex_to_int(parts[1] or None) fend = ascii_hex_to_int(parts[2] or None) return drive+fname, fstart, fend
[ "Special method for hexmerge.py script to split file notation\n into 3 parts: (filename, start, end)\n\n @raise _BadFileNotation when string cannot be safely split.\n " ]
Please provide a description of the function:def _decode_record(self, s, line=0): '''Decode one record of HEX file. @param s line with HEX record. @param line line number (for error messages). @raise EndOfFile if EOF record encountered. ''' s = s.rstrip('\r\n') if not s: return # empty line if s[0] == ':': try: bin = array('B', unhexlify(asbytes(s[1:]))) except (TypeError, ValueError): # this might be raised by unhexlify when odd hexascii digits raise HexRecordError(line=line) length = len(bin) if length < 5: raise HexRecordError(line=line) else: raise HexRecordError(line=line) record_length = bin[0] if length != (5 + record_length): raise RecordLengthError(line=line) addr = bin[1]*256 + bin[2] record_type = bin[3] if not (0 <= record_type <= 5): raise RecordTypeError(line=line) crc = sum(bin) crc &= 0x0FF if crc != 0: raise RecordChecksumError(line=line) if record_type == 0: # data record addr += self._offset for i in range_g(4, 4+record_length): if not self._buf.get(addr, None) is None: raise AddressOverlapError(address=addr, line=line) self._buf[addr] = bin[i] addr += 1 # FIXME: addr should be wrapped # BUT after 02 record (at 64K boundary) # and after 04 record (at 4G boundary) elif record_type == 1: # end of file record if record_length != 0: raise EOFRecordError(line=line) raise _EndOfFile elif record_type == 2: # Extended 8086 Segment Record if record_length != 2 or addr != 0: raise ExtendedSegmentAddressRecordError(line=line) self._offset = (bin[4]*256 + bin[5]) * 16 elif record_type == 4: # Extended Linear Address Record if record_length != 2 or addr != 0: raise ExtendedLinearAddressRecordError(line=line) self._offset = (bin[4]*256 + bin[5]) * 65536 elif record_type == 3: # Start Segment Address Record if record_length != 4 or addr != 0: raise StartSegmentAddressRecordError(line=line) if self.start_addr: raise DuplicateStartAddressRecordError(line=line) self.start_addr = {'CS': bin[4]*256 + bin[5], 'IP': bin[6]*256 + bin[7], } elif record_type == 5: # Start Linear Address Record if record_length != 4 or addr != 0: raise StartLinearAddressRecordError(line=line) if self.start_addr: raise DuplicateStartAddressRecordError(line=line) self.start_addr = {'EIP': (bin[4]*16777216 + bin[5]*65536 + bin[6]*256 + bin[7]), }
[]
Please provide a description of the function:def loadhex(self, fobj): if getattr(fobj, "read", None) is None: fobj = open(fobj, "r") fclose = fobj.close else: fclose = None self._offset = 0 line = 0 try: decode = self._decode_record try: for s in fobj: line += 1 decode(s, line) except _EndOfFile: pass finally: if fclose: fclose()
[ "Load hex file into internal buffer. This is not necessary\n if object was initialized with source set. This will overwrite\n addresses if object was already initialized.\n\n @param fobj file name or file-like object\n " ]
Please provide a description of the function:def loadbin(self, fobj, offset=0): fread = getattr(fobj, "read", None) if fread is None: f = open(fobj, "rb") fread = f.read fclose = f.close else: fclose = None try: self.frombytes(array('B', asbytes(fread())), offset=offset) finally: if fclose: fclose()
[ "Load bin file into internal buffer. Not needed if source set in\n constructor. This will overwrite addresses without warning\n if object was already initialized.\n\n @param fobj file name or file-like object\n @param offset starting address offset\n " ]
Please provide a description of the function:def loadfile(self, fobj, format): if format == "hex": self.loadhex(fobj) elif format == "bin": self.loadbin(fobj) else: raise ValueError('format should be either "hex" or "bin";' ' got %r instead' % format)
[ "Load data file into internal buffer. Preferred wrapper over\n loadbin or loadhex.\n\n @param fobj file name or file-like object\n @param format file format (\"hex\" or \"bin\")\n " ]