docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Upgrade or degrade resolution of a pixel list.
Parameters:
-----------
ipix:array-like
the input pixel(s)
nside_in:int
the nside of the input pixel(s)
nside_out:int
the desired nside of the output pixel(s)
order:str
pixel ordering of input and output ("RING" or "NESTED")
Returns:
--------
pix_out:array-like
the upgraded or degraded pixel array
|
def ud_grade_ipix(ipix, nside_in, nside_out, nest=False):
if nside_in == nside_out: return ipix
elif nside_in < nside_out:
return u_grade_ipix(ipix, nside_in, nside_out, nest)
elif nside_in > nside_out:
return d_grade_ipix(ipix, nside_in, nside_out, nest)
| 848,389 |
Check if (lon,lat) in pixel list. Assumes RING formatting.
Parameters:
-----------
lon : longitude (deg)
lat : latitude (deg)
pixels : pixel list [RING format] to check for inclusion
nside : nside of pixel list
Returns:
--------
inpix : boolean array for inclusion
|
def in_pixels(lon,lat,pixels,nside):
pix = ang2pix(nside,lon,lat)
return np.in1d(pix,pixels)
| 848,395 |
Find the indices of a set of pixels into another set of pixels.
!!! ASSUMES SORTED PIXELS !!!
Parameters:
-----------
pix : set of search pixels
pixels : set of reference pixels
Returns:
--------
index : index into the reference pixels
|
def index_pix_in_pixels(pix,pixels,sort=False,outside=-1):
# ADW: Not really safe to set index = -1 (accesses last entry);
# -np.inf would be better, but breaks other code...
# ADW: Are the pixels always sorted? Is there a quick way to check?
if sort: pixels = np.sort(pixels)
# Assumes that 'pixels' is pre-sorted, otherwise...???
index = np.searchsorted(pixels,pix)
if np.isscalar(index):
if not np.in1d(pix,pixels).any(): index = outside
else:
# Find objects that are outside the pixels
index[~np.in1d(pix,pixels)] = outside
return index
| 848,396 |
Find the indices of a set of angles into a set of pixels
Parameters:
-----------
pix : set of search pixels
pixels : set of reference pixels
Returns:
--------
index : index into the reference pixels
|
def index_lonlat_in_pixels(lon,lat,pixels,nside,sort=False,outside=-1):
pix = ang2pix(nside,lon,lat)
return index_pix_in_pixels(pix,pixels,sort,outside)
| 848,397 |
Read a partial HEALPix file(s) and return pixels and values/map. Can
handle 3D healpix maps (pix, value, zdim). Returned array has
shape (dimz,npix).
Parameters:
-----------
filenames : list of input filenames
column : column of interest
fullsky : partial or fullsky map
kwargs : passed to fitsio.read
Returns:
--------
(nside,pix,map) : pixel array and healpix map (partial or fullsky)
|
def read_partial_map(filenames, column, fullsky=True, **kwargs):
# Make sure that PIXEL is in columns
#kwargs['columns'] = ['PIXEL',column]
kwargs['columns'] = ['PIXEL'] + np.atleast_1d(column).tolist()
filenames = np.atleast_1d(filenames)
header = fitsio.read_header(filenames[0],ext=kwargs.get('ext',1))
data = ugali.utils.fileio.load_files(filenames,**kwargs)
pix = data['PIXEL']
value = data[column]
nside = header['NSIDE']
npix = hp.nside2npix(nside)
ndupes = len(pix) - len(np.unique(pix))
if ndupes > 0:
msg = '%i duplicate pixels during load.'%(ndupes)
raise Exception(msg)
if fullsky and not np.isscalar(column):
raise Exception("Cannot make fullsky map from list of columns.")
if fullsky:
shape = list(value.shape)
shape[0] = npix
hpxmap = hp.UNSEEN * np.ones(shape,dtype=value.dtype)
hpxmap[pix] = value
return (nside,pix,hpxmap.T)
else:
return (nside,pix,value.T)
| 848,402 |
Merge header information from likelihood files.
Parameters:
-----------
filenames : input filenames
oufile : the merged file to write
Returns:
--------
data : the data being written
|
def merge_likelihood_headers(filenames, outfile):
filenames = np.atleast_1d(filenames)
ext='PIX_DATA'
nside = fitsio.read_header(filenames[0],ext=ext)['LKDNSIDE']
keys=['STELLAR','NINSIDE','NANNULUS']
data_dict = odict(PIXEL=[])
for k in keys:
data_dict[k] = []
for i,filename in enumerate(filenames):
logger.debug('(%i/%i) %s'%(i+1, len(filenames), filename))
header = fitsio.read_header(filename,ext=ext)
data_dict['PIXEL'].append(header['LKDPIX'])
for key in keys:
data_dict[key].append(header[key])
del header
data_dict['PIXEL'] = np.array(data_dict['PIXEL'],dtype=int)
for key in keys:
data_dict[key] = np.array(data_dict[key],dtype='f4')
#import pdb; pdb.set_trace()
write_partial_map(outfile, data_dict, nside)
return data_dict
| 848,404 |
Extends the reservation on this cart by the given timedelta.
This can only be done if the current state of the cart is valid (i.e
all items and discounts in the cart are still available.)
Arguments:
timedelta (timedelta): The amount of time to extend the cart by.
The resulting reservation_duration will be now() + timedelta,
unless the requested extension is *LESS* than the current
reservation deadline.
|
def extend_reservation(self, timedelta):
self.validate_cart()
cart = self.cart
cart.refresh_from_db()
elapsed = (timezone.now() - cart.time_last_updated)
if cart.reservation_duration - elapsed > timedelta:
return
cart.time_last_updated = timezone.now()
cart.reservation_duration = timedelta
cart.save()
| 848,411 |
Decorator that converts a report view function into something that
displays a Report.
Arguments:
title (str):
The title of the report.
form_type (Optional[forms.Form]):
A form class that can make this report display things. If not
supplied, no form will be displayed.
|
def report_view(title, form_type=None):
# Create & return view
def _report(view):
report_view = ReportView(view, title, form_type)
report_view = user_passes_test(views._staff_only)(report_view)
report_view = wraps(view)(report_view)
# Add this report to the list of reports.
_all_report_views.append(report_view)
return report_view
return _report
| 848,452 |
Renders the reports based on data.content_type's value.
Arguments:
data (ReportViewRequestData): The report data. data.content_type
is used to determine how the reports are rendered.
Returns:
HTTPResponse: The rendered version of the report.
|
def render(self, data):
renderers = {
"text/csv": self._render_as_csv,
"text/html": self._render_as_html,
None: self._render_as_html,
}
render = renderers[data.content_type]
return render(data)
| 848,468 |
Marks an invoice as refunded and requests a credit note for the
full amount paid against the invoice.
This view requires a login, and the logged in user must be staff.
Arguments:
invoice_id (castable to int): The ID of the invoice to refund.
Returns:
redirect:
Redirects to ``invoice``.
|
def refund(request, invoice_id):
current_invoice = InvoiceController.for_id_or_404(invoice_id)
try:
current_invoice.refund()
messages.success(request, "This invoice has been refunded.")
except ValidationError as ve:
messages.error(request, ve)
return redirect("invoice", invoice_id)
| 848,864 |
Server query for the isochrone file.
Parameters:
-----------
outfile : name of output isochrone file
age : isochrone age
metallicity : isochrone metallicity
Returns:
--------
outfile : name of output isochrone file
|
def query_server(self,outfile,age,metallicity):
params = copy.deepcopy(self.download_defaults)
epsilon = 1e-4
lage = np.log10(age*1e9)
lage_min,lage_max = params['isoc_lage0'],params['isoc_lage1']
if not (lage_min-epsilon < lage <lage_max+epsilon):
msg = 'Age outside of valid range: %g [%g < log(age) < %g]'%(lage,lage_min,lage_max)
raise RuntimeError(msg)
z_min,z_max = params['isoc_z0'],params['isoc_z1']
if not (z_min <= metallicity <= z_max):
msg = 'Metallicity outside of valid range: %g [%g < z < %g]'%(metallicity,z_min,z_max)
raise RuntimeError(msg)
params['photsys_file'] = photsys_dict[self.survey]
params['isoc_age'] = age * 1e9
params['isoc_zeta'] = metallicity
server = self.download_url
url = server + '/cgi-bin/cmd_%s'%params['cmd_version']
# First check that the server is alive
logger.debug("Accessing %s..."%url)
urlopen(url,timeout=2)
q = urlencode(params).encode('utf-8')
logger.debug(url+'?'+q)
c = str(urlopen(url, q).read())
aa = re.compile('output\d+')
fname = aa.findall(c)
if len(fname) == 0:
msg = "Output filename not found"
raise RuntimeError(msg)
out = '{0}/tmp/{1}.dat'.format(server, fname[0])
cmd = 'wget --progress dot:binary %s -O %s'%(out,outfile)
logger.debug(cmd)
stdout = subprocess.check_output(cmd,shell=True,stderr=subprocess.STDOUT)
logger.debug(stdout)
return outfile
| 849,134 |
Object to efficiently search over a grid of ROI positions.
Parameters:
-----------
config : Configuration object or filename.
loglike : Log-likelihood object
Returns:
--------
grid : GridSearch instance
|
def __init__(self, config, loglike): # What it should be...
self.config = Config(config)
self.loglike = loglike
self.roi = self.loglike.roi
self.mask = self.loglike.mask
logger.info(str(self.loglike))
self.stellar_mass_conversion = self.loglike.source.stellar_mass()
self.distance_modulus_array = np.asarray(self.config['scan']['distance_modulus_array'])
| 849,139 |
DEPRECATED: ADW 20170627
Precompute color probabilities for background ('u_background')
and signal ('u_color') for each star in catalog. Precompute
observable fraction in each ROI pixel. # Precompute still
operates over the full ROI, not just the likelihood region
Parameters:
-----------
distance_modulus_array : Array of distance moduli
Returns:
--------
None
|
def precompute(self, distance_modulus_array=None):
msg = "'%s.precompute': ADW 2017-09-20"%self.__class__.__name__
DeprecationWarning(msg)
if distance_modulus_array is not None:
self.distance_modulus_array = distance_modulus_array
else:
self.distance_modulus_array = sel
# Observable fraction for each pixel
self.u_color_array = [[]] * len(self.distance_modulus_array)
self.observable_fraction_sparse_array = [[]] * len(self.distance_modulus_array)
logger.info('Looping over distance moduli in precompute ...')
for ii, distance_modulus in enumerate(self.distance_modulus_array):
logger.info(' (%i/%i) Distance Modulus = %.2f ...'%(ii+1, len(self.distance_modulus_array), distance_modulus))
self.u_color_array[ii] = False
if self.config['scan']['color_lut_infile'] is not None:
DeprecationWarning("'color_lut' is deprecated")
logger.info(' Precomputing signal color from %s'%(self.config['scan']['color_lut_infile']))
self.u_color_array[ii] = ugali.analysis.color_lut.readColorLUT(self.config['scan']['color_lut_infile'],
distance_modulus,
self.loglike.catalog.mag_1,
self.loglike.catalog.mag_2,
self.loglike.catalog.mag_err_1,
self.loglike.catalog.mag_err_2)
if not np.any(self.u_color_array[ii]):
logger.info(' Precomputing signal color on the fly...')
self.u_color_array[ii] = self.loglike.calc_signal_color(distance_modulus)
# Calculate over all pixels in ROI
self.observable_fraction_sparse_array[ii] = self.loglike.calc_observable_fraction(distance_modulus)
self.u_color_array = np.array(self.u_color_array)
| 849,140 |
Identify peak using Gaussian kernel density estimator.
Parameters:
-----------
data : The 1d data sample
npoints : The number of kde points to evaluate
|
def kde(data, npoints=_npoints):
# Clipping of severe outliers to concentrate more KDE samples in the parameter range of interest
mad = np.median(np.fabs(np.median(data) - data))
cut = (data > np.median(data) - 5. * mad) & (data < np.median(data) + 5. * mad)
x = data[cut]
kde = scipy.stats.gaussian_kde(x)
# No penalty for using a finer sampling for KDE evaluation except computation time
values = np.linspace(np.min(x), np.max(x), npoints)
kde_values = kde.evaluate(values)
peak = values[np.argmax(kde_values)]
return values[np.argmax(kde_values)], kde.evaluate(peak)
| 849,184 |
Produces a callable so that functions can be lazily evaluated in
templates.
Arguments:
function (callable): The function to call at evaluation time.
args: Positional arguments, passed directly to ``function``.
kwargs: Keyword arguments, passed directly to ``function``.
Return:
callable: A callable that will evaluate a call to ``function`` with
the specified arguments.
|
def lazy(function, *args, **kwargs):
NOT_EVALUATED = object()
retval = [NOT_EVALUATED]
def evaluate():
if retval[0] is NOT_EVALUATED:
retval[0] = function(*args, **kwargs)
return retval[0]
return evaluate
| 849,310 |
Returns the named object.
Arguments:
name (str): A string of form `package.subpackage.etc.module.property`.
This function will import `package.subpackage.etc.module` and
return `property` from that module.
|
def get_object_from_name(name):
dot = name.rindex(".")
mod_name, property_name = name[:dot], name[dot + 1:]
__import__(mod_name)
return getattr(sys.modules[mod_name], property_name)
| 849,311 |
Break catalog into chunks by healpix pixel.
Parameters:
-----------
infiles : List of input files
config : Configuration file
force : Overwrite existing files (depricated)
Returns:
--------
None
|
def pixelizeCatalog(infiles, config, force=False):
nside_catalog = config['coords']['nside_catalog']
nside_pixel = config['coords']['nside_pixel']
coordsys = config['coords']['coordsys'].upper()
outdir = mkdir(config['catalog']['dirname'])
filenames = config.getFilenames()
lon_field = config['catalog']['lon_field'].upper()
lat_field = config['catalog']['lat_field'].upper()
# ADW: It would probably be better (and more efficient) to do the
# pixelizing and the new column insertion separately.
for i,filename in enumerate(infiles):
logger.info('(%i/%i) %s'%(i+1, len(infiles), filename))
data = fitsio.read(filename)
logger.info("%i objects found"%len(data))
if not len(data): continue
columns = map(str.upper,data.dtype.names)
names,arrs = [],[]
if (lon_field in columns) and (lat_field in columns):
lon,lat = data[lon_field],data[lat_field]
elif coordsys == 'GAL':
msg = "Columns '%s' and '%s' not found."%(lon_field,lat_field)
msg += "\nConverting from RA,DEC"
logger.warning(msg)
lon,lat = cel2gal(data['RA'],data['DEC'])
names += [lon_field,lat_field]
arrs += [lon,lat]
elif coordsys == 'CEL':
msg = "Columns '%s' and '%s' not found."%(lon_field,lat_field)
msg += "\nConverting from GLON,GLAT"
lon,lat = gal2cel(data['GLON'],data['GLAT'])
names += [lon_field,lat_field]
arrs += [lon,lat]
cat_pix = ang2pix(nside_catalog,lon,lat)
pix_pix = ang2pix(nside_pixel,lon,lat)
cat_pix_name = 'PIX%i'%nside_catalog
pix_pix_name = 'PIX%i'%nside_pixel
try:
names += [cat_pix_name,pix_pix_name]
arrs += [cat_pix,pix_pix]
data=mlab.rec_append_fields(data,names=names,arrs=arrs)
except ValueError as e:
logger.warn(str(e)+'; not adding column.')
#data[cat_pix_name] = cat_pix
#data[pix_pix_name] = pix_pix
for pix in np.unique(cat_pix):
logger.debug("Processing pixel %s"%pix)
arr = data[cat_pix == pix]
outfile = filenames.data['catalog'][pix]
if not os.path.exists(outfile):
logger.debug("Creating %s"%outfile)
out=fitsio.FITS(outfile,mode='rw')
out.write(arr)
hdr=healpix.header_odict(nside=nside_catalog,
coord=coordsys[0])
for key in ['PIXTYPE','ORDERING','NSIDE','COORDSYS']:
out[1].write_key(*list(hdr[key].values()))
out[1].write_key('PIX',pix,comment='HEALPIX pixel for this file')
else:
out=fitsio.FITS(outfile,mode='rw')
out[1].append(arr)
logger.debug("Writing %s"%outfile)
out.close()
| 849,364 |
Get flow direction induced cell length dict.
Args:
flow_model: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported.
|
def get_cell_length(flow_model):
assert flow_model.lower() in FlowModelConst.d8_lens
return FlowModelConst.d8_lens.get(flow_model.lower())
| 849,431 |
Get flow direction induced cell shift dict.
Args:
flow_model: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported.
|
def get_cell_shift(flow_model):
assert flow_model.lower() in FlowModelConst.d8_deltas
return FlowModelConst.d8_deltas.get(flow_model.lower())
| 849,432 |
convert D8 flow direction code from one algorithm to another.
Args:
in_file: input raster file path
out_file: output raster file path
in_alg: available algorithms are in FlowModelConst.d8_dirs. "taudem" is the default
out_alg: same as in_alg. "arcgis" is the default
datatype: default is None and use the datatype of the in_file
|
def convert_code(in_file, out_file, in_alg='taudem', out_alg='arcgis', datatype=None):
FileClass.check_file_exists(in_file)
in_alg = in_alg.lower()
out_alg = out_alg.lower()
if in_alg not in FlowModelConst.d8_dirs or out_alg not in FlowModelConst.d8_dirs:
raise RuntimeError('The input algorithm name should one of %s' %
', '.join(list(FlowModelConst.d8_dirs.keys())))
convert_dict = dict()
in_code = FlowModelConst.d8_dirs.get(in_alg)
out_code = FlowModelConst.d8_dirs.get(out_alg)
assert len(in_code) == len(out_code)
for i, tmp_in_code in enumerate(in_code):
convert_dict[tmp_in_code] = out_code[i]
if datatype is not None and datatype in GDALDataType:
RasterUtilClass.raster_reclassify(in_file, convert_dict, out_file, datatype)
else:
RasterUtilClass.raster_reclassify(in_file, convert_dict, out_file)
| 849,434 |
checks if name has been changed and ignores the name change if the changed_item is an existing script
Args:
changed_item:
|
def name_changed(self, changed_item):
name = str(changed_item.text())
# if the item has been moved we ignore this because the item only went from one tree to the other without changing names
if name != '':
if name != self.selected_element_name:
self.elements_from_file[name] = self.elements_from_file[self.selected_element_name]
del self.elements_from_file[self.selected_element_name]
self.selected_element_name = name
| 849,454 |
fills a tree with nested parameters
Args:
tree: QtGui.QTreeView
parameters: dictionary or Parameter object
Returns:
|
def fill_tree(self, tree, input_dict):
def add_element(item, key, value):
child_name = QtGui.QStandardItem(key)
child_name.setDragEnabled(False)
child_name.setSelectable(False)
child_name.setEditable(False)
if isinstance(value, dict):
for ket_child, value_child in value.items():
add_element(child_name, ket_child, value_child)
child_value = QtGui.QStandardItem('')
else:
child_value = QtGui.QStandardItem(str(value))
child_value.setData(value)
child_value.setDragEnabled(False)
child_value.setSelectable(False)
child_value.setEditable(False)
item.appendRow([child_name, child_value])
for index, (loaded_item, loaded_item_settings) in enumerate(input_dict.items()):
# print(index, loaded_item, loaded_item_settings)
item = QtGui.QStandardItem(loaded_item)
for key, value in loaded_item_settings['settings'].items():
add_element(item, key, value)
value = QtGui.QStandardItem('')
tree.model().appendRow([item, value])
if tree == self.tree_loaded:
item.setEditable(False)
tree.setFirstColumnSpanned(index, self.tree_infile.rootIndex(), True)
| 849,458 |
Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info
|
def ped_parser(self, family_info):
for line in family_info:
# Check if commented line or empty line:
if not line.startswith('#') and not all(c in whitespace for c in line.rstrip()):
splitted_line = line.rstrip().split('\t')
if len(splitted_line) != 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, 6)
except WrongLineFormat as e:
self.logger.error(e)
self.logger.info("Ped line: {0}".format(e.ped_line))
raise e
sample_dict = dict(zip(self.header, splitted_line))
family_id = sample_dict['family_id']
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
| 849,470 |
Parse alternative formatted family info
This parses a information with more than six columns.
For alternative information header comlumn must exist and each row
must have the same amount of columns as the header.
First six columns must be the same as in the ped format.
Arguments:
family_info (iterator): An iterator with family info
|
def alternative_parser(self, family_file):
alternative_header = None
for line in family_file:
if line.startswith('#'):
alternative_header = line[1:].rstrip().split('\t')
self.logger.info("Alternative header found: {0}".format(line))
elif line.strip():
if not alternative_header:
raise WrongLineFormat(message="Alternative ped files must have "\
"headers! Please add a header line.")
splitted_line = line.rstrip().split('\t')
if len(splitted_line) < 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, len(alternative_header))
except SyntaxError as e:
self.logger.error('Number of entrys differ from header.')
self.logger.error("Header:\n{0}".format('\t'.join(alternative_header)))
self.logger.error("Ped Line:\n{0}".format('\t'.join(splitted_line)))
self.logger.error("Length of Header: {0}. Length of "\
"Ped line: {1}".format(
len(alternative_header),
len(splitted_line))
)
raise e
if len(line) > 1:
sample_dict = dict(zip(self.header, splitted_line[:6]))
family_id = sample_dict['family_id']
all_info = dict(zip(alternative_header, splitted_line))
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
sample_dict['genetic_models'] = all_info.get('InheritanceModel', None)
# Try other header naming:
if not sample_dict['genetic_models']:
sample_dict['genetic_models'] = all_info.get('Inheritance_model', None)
sample_dict['proband'] = all_info.get('Proband', '.')
sample_dict['consultand'] = all_info.get('Consultand', '.')
sample_dict['alive'] = all_info.get('Alive', '.')
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object)
if sample_dict['genetic_models']:
for model in self.get_models(sample_dict['genetic_models']):
self.families[ind_object.family].models_of_inheritance.add(model)
# If requested, we try is it is an id in the CMMS format:
sample_id_parts = ind_object.individual_id.split('-')
if self.cmms_check and (len(sample_id_parts) == 3):
# If the id follow the CMMS convention we can
# do a sanity check
if self.check_cmms_id(ind_object.individual_id):
self.logger.debug("Id follows CMMS convention: {0}".format(
ind_object.individual_id
))
self.logger.debug("Checking CMMS id affections status")
try:
self.check_cmms_affection_status(ind_object)
except WrongAffectionStatus as e:
self.logger.error("Wrong affection status for"\
" {0}. Affection status can be in"\
" {1}".format(e.cmms_id, e.valid_statuses))
raise e
except WrongPhenotype as e:
self.logger.error("Affection status for {0} "\
"({1}) disagrees with phenotype ({2})".format(
e.cmms_id, e.phenotype, e.affection_status
))
raise e
try:
self.check_cmms_gender(ind_object)
except WrongGender as e:
self.logger.error("Gender code for id {0}"\
"({1}) disagrees with sex:{2}".format(
e.cmms_id, e.sex_code, e.sex
))
raise e
for i in range(6, len(splitted_line)):
ind_object.extra_info[alternative_header[i]] = splitted_line[i]
| 849,471 |
Check if the affection status is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if affection status is correct
False otherwise
|
def check_cmms_affection_status(self, ind_object):
valid_affection_statuses = ['A', 'U', 'X']
ind_id = ind_object.individual_id.split('-')
phenotype = ind_object.phenotype
affection_status = ind_id[-1][-1]
if affection_status not in valid_affection_statuses:
raise WrongAffectionStatus(ind_object.individual_id,
valid_affection_statuses)
if (affection_status == 'A' and phenotype != 2 or
affection_status == 'U' and phenotype != 1):
raise WrongPhenotype(ind_object.individual_id, phenotype,
affection_status)
return True
| 849,473 |
Check if the phenotype is correct.
Args:
ind_object : An Individuals object
Yields:
bool : True if phenotype status is correct
False otherwise
|
def check_cmms_gender(self, ind_object):
ind_id = ind_object.individual_id.split('-')
sex = ind_object.sex
sex_code = int(ind_id[-1][:-1])# Males allways have odd numbers and womans even
if (sex_code % 2 == 0 and sex != 2) or (sex_code % 2 != 0 and sex != 1):
raise WrongGender(ind_object.individual_id, sex, sex_code)
return True
| 849,474 |
Check what genetic models that are found and return them as a set.
Args:
genetic_models : A string with genetic models
Yields:
correct_model_names : A set with the correct model names
|
def get_models(self, genetic_models):
correct_model_names = set()
genetic_models = genetic_models.split(';')
correct_model_names = set()
for model in genetic_models:
# We need to allow typos
if model in self.legal_ar_hom_names:
model = 'AR_hom'
elif model in self.legal_ar_hom_dn_names:
model = 'AR_hom_dn'
elif model in self.legal_ad_names:
model = 'AD_dn'
elif model in self.legal_compound_names:
model = 'AR_comp'
elif model in self.legal_x_names:
model = 'X'
elif model in self.legal_na_names:
model = 'NA'
else:
self.logger.warning("Incorrect model name: {0}."\
" Ignoring model.".format(model))
correct_model_names.add(model)
return correct_model_names
| 849,475 |
overwrites the standard dictionary and checks if value is valid
Args:
key: dictionary key
value: dictionary value
|
def __setitem__(self, key, value):
# print('AHHAHAH', self.valid_values)
message = "{0} (of type {1}) is not in {2}".format(str(value), type(value), str(self.valid_values[key]))
assert self.is_valid(value, self.valid_values[key]), message
if isinstance(value, dict) and len(self)>0 and len(self) == len(self.valid_values):
for k, v in value.items():
self[key].update({k:v})
else:
super(Parameter, self).__setitem__(key, value)
| 849,497 |
check is the value is valid
Args:
value: value to be tested
valid_values: allowed valid values (type or list of values)
Returns:
|
def is_valid(value, valid_values):
valid = False
if isinstance(valid_values, type) and type(value) is valid_values:
valid = True
elif isinstance(valid_values, type) and valid_values == float and type(value) == int:
#special case to allow ints as float inputs
valid = True
elif isinstance(value, dict) and isinstance(valid_values, dict):
# check that all values actually exist in valid_values
# assert value.keys() & valid_values.keys() == value.keys() # python 3 syntax
assert set(value.keys()) & set(valid_values.keys()) == set(value.keys()) # python 2
# valid = True
for k ,v in value.items():
valid = Parameter.is_valid(v, valid_values[k])
if valid ==False:
break
elif isinstance(value, dict) and valid_values == Parameter:
valid = True
elif isinstance(valid_values, list) and value in valid_values:
valid = True
return valid
| 849,499 |
Example of a script
Args:
name (optional): name of script, if empty same as class name
settings (optional): settings for this script, if empty same as default settings
|
def __init__(self, name=None, settings=None,
log_function = None, data_path = None):
Script.__init__(self, name, settings, log_function= log_function, data_path = data_path)
| 849,651 |
plots the data only the axes objects that are provided in axes_list
Args:
axes_list: a list of axes objects, this should be implemented in each subscript
data: data to be plotted if empty take self.data
Returns: None
|
def _plot(self, axes_list, data = None):
plot_type = self.settings['plot_style']
if data is None:
data = self.data
if data is not None and data is not {}:
if plot_type in ('main', 'two'):
if not data['random data'] is None:
axes_list[0].plot(data['random data'])
axes_list[0].hold(False)
if plot_type in ('aux', 'two', '2D'):
if not data['random data'] is None:
axes_list[1].plot(data['random data'])
axes_list[1].hold(False)
if plot_type == '2D':
if 'image data' in data and not data['image data'] is None:
fig = axes_list[0].get_figure()
implot = axes_list[0].imshow(data['image data'], cmap='pink', interpolation="nearest", extent=[-1,1,1,-1])
fig.colorbar(implot, label='kcounts/sec')
| 849,653 |
updates the data in already existing plots. the axes objects are provided in axes_list
Args:
axes_list: a list of axes objects, this should be implemented in each subscript
Returns: None
|
def _update(self, axes_list):
plot_type = self.settings['plot_style']
if plot_type == '2D':
# we expect exactely one image in the axes object (see ScriptDummy.plot)
implot = axes_list[1].get_images()[0]
# now update the data
implot.set_data(self.data['random data'])
colorbar = implot.colorbar
if not colorbar is None:
colorbar.update_bruteforce(implot)
else:
# fall back to default behaviour
Script._update(self, axes_list)
| 849,654 |
Example of a script
Args:
name (optional): name of script, if empty same as class name
settings (optional): settings for this script, if empty same as default settings
|
def __init__(self, instruments = None, scripts = None, name=None, settings=None, log_function = None, data_path = None):
super(ScriptDummyWrapper, self).__init__(self, name, settings, log_function= log_function, data_path=data_path)
| 849,655 |
Example of a script that emits a QT signal for the gui
Args:
name (optional): name of script, if empty same as class name
settings (optional): settings for this script, if empty same as default settings
|
def __init__(self, instruments, scripts = None, name=None, settings=None, log_function=None, data_path = None):
Script.__init__(self, name, settings=settings, scripts=scripts, instruments=instruments, log_function=log_function, data_path = data_path)
self.data = {'plant_output': deque(maxlen=self.settings['buffer_length']),
'control_output': deque(maxlen=self.settings['buffer_length'])}
| 849,656 |
fills a tree with nested parameters
Args:
tree: QtGui.QTreeView to fill
parameters: dictionary or Parameter object which contains the information to use to fill
|
def fill_list(self, list, input_list):
for name in input_list:
# print(index, loaded_item, loaded_item_settings)
item = QtGui.QStandardItem(name)
item.setSelectable(True)
item.setEditable(False)
list.model().appendRow(item)
| 849,677 |
fills a tree with nested parameters
Args:
tree: QtGui.QTreeView
parameters: dictionary or Parameter object
Returns:
|
def fill_tree(self, tree, input_dict):
def removeAll(tree):
if tree.model().rowCount() > 0:
for i in range(0, tree.model().rowCount()):
item = tree.model().item(i)
del item
tree.model().removeRows(0, tree.model().rowCount())
tree.model().reset()
def add_probe(tree, instrument, probes):
item = QtGui.QStandardItem(instrument)
item.setEditable(False)
for probe in probes.split(','):
child_name = QtGui.QStandardItem(probe)
child_name.setDragEnabled(True)
child_name.setSelectable(True)
child_name.setEditable(False)
item.appendRow(child_name)
tree.model().appendRow(item)
removeAll(tree)
for index, (instrument, probes) in enumerate(input_dict.items()):
add_probe(tree, instrument, probes)
# tree.setFirstColumnSpanned(index, self.tree_infile.rootIndex(), True)
tree.expandAll()
| 849,749 |
Check if there are any grand parents.
Set the grandparents id:s
Arguments:
mother (Individual): An Individual object that represents the mother
father (Individual): An Individual object that represents the father
|
def check_grandparents(self, mother = None, father = None):
if mother:
if mother.mother != '0':
self.grandparents[mother.mother] = ''
elif mother.father != '0':
self.grandparents[mother.father] = ''
if father:
if father.mother != '0':
self.grandparents[father.mother] = ''
elif father.father != '0':
self.grandparents[father.father] = ''
return
| 849,751 |
returns all the packages in the module
Args:
module_name: name of module
Returns:
|
def explore_package(module_name):
packages = []
loader = pkgutil.get_loader(module_name)
for sub_module in pkgutil.walk_packages([os.path.dirname(loader.get_filename())],
prefix=module_name + '.'):
_, sub_module_name, _ = sub_module
packages.append(sub_module_name)
return packages
| 849,773 |
gets the value for "name" from "path_to_file" config file
Args:
name: name of varibale in config file
path_to_file: path to config file
Returns: path to dll if name exists in the file; otherwise, returns None
|
def get_config_value(name, path_to_file='config.txt'):
# if the function is called from gui then the file has to be located with respect to the gui folder
if not os.path.isfile(path_to_file):
path_to_file = os.path.join('../instruments/', path_to_file)
path_to_file = os.path.abspath(path_to_file)
if not os.path.isfile(path_to_file):
print(('path_to_file', path_to_file))
#raise IOError('{:s}: config file is not valid'.format(path_to_file))
return None
f = open(path_to_file, 'r')
string_of_file_contents = f.read()
if name[-1] is not ':':
name += ':'
if name not in string_of_file_contents:
return None
else:
config_value = [line.split(name)[1] for line in string_of_file_contents.split('\n')
if len(line.split(name)) > 1][0].strip()
return config_value
| 849,781 |
loads a .b26 file into a dictionary
Args:
file_name:
Returns: dictionary with keys instrument, scripts, probes
|
def load_b26_file(file_name):
# file_name = "Z:\Lab\Cantilever\Measurements\\tmp_\\a"
assert os.path.exists(file_name)
with open(file_name, 'r') as infile:
data = yaml.safe_load(infile)
return data
| 849,782 |
save instruments, scripts and probes as a json file
Args:
filename:
instruments:
scripts:
probes: dictionary of the form {instrument_name : probe_1_of_intrument, probe_2_of_intrument, ...}
Returns:
|
def save_b26_file(filename, instruments=None, scripts=None, probes=None, overwrite=False, verbose=False):
# if overwrite is false load existing data and append to new instruments
if os.path.isfile(filename) and overwrite == False:
data_dict = load_b26_file(filename)
else:
data_dict = {}
if instruments is not None:
if 'instruments' in data_dict:
data_dict['instruments'].update(instruments)
else:
data_dict['instruments'] = instruments
if scripts is not None:
if 'scripts' in data_dict:
data_dict['scripts'].update(scripts)
else:
data_dict['scripts'] = scripts
if probes is not None:
probe_instruments = list(probes.keys())
if 'probes' in data_dict:
# all the instruments required for old and new probes
probe_instruments= set(probe_instruments + list(data_dict['probes'].keys()))
else:
data_dict.update({'probes':{}})
for instrument in probe_instruments:
if instrument in data_dict['probes'] and instrument in probes:
# update the data_dict
data_dict['probes'][instrument] = ','.join(set(data_dict['probes'][instrument].split(',') + probes[instrument].split(',')))
else:
data_dict['probes'].update(probes)
if verbose:
print(('writing ', filename))
if data_dict != {}:
# if platform == 'Windows':
# # windows can't deal with long filenames so we have to use the prefix '\\\\?\\'
# if len(filename.split('\\\\?\\')) == 1:
# filename = '\\\\?\\'+ filename
# create folder if it doesn't exist
if verbose:
print(('filename', filename))
print(('exists', os.path.exists(os.path.dirname(filename))))
if os.path.exists(os.path.dirname(filename)) is False:
# print(('creating', os.path.dirname(filename)))
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as outfile:
tmp = json.dump(data_dict, outfile, indent=4)
| 849,783 |
NOT IMPLEMENTED YET
tries to instantiate all the instruments that are imported in /instruments/__init__.py
and the probes of each instrument that could be instantiated into a .b26 file in the folder path
Args:
path: target path for .b26 files
|
def export_default_probes(path, module_name = '', raise_errors = False):
raise NotImplementedError
import b26_toolkit.b26_toolkit.instruments as instruments
from pylabcontrol.core import Probe
for name, obj in inspect.getmembers(instruments):
if inspect.isclass(obj):
try:
instrument = obj()
print(('--- created ', obj.__name__, ' -- '))
for probe_name, probe_info in instrument._PROBES.items():
probe = Probe(instrument, probe_name, info = probe_info)
filename = os.path.join(path, '{:s}.b26'.format(instrument.name))
probe.save(filename)
except:
print(('failed to create probe file for: {:s}'.format(obj.__name__)))
print(('failed to create probe file for: {:s}'.format(obj.__name__)))
| 849,794 |
tries to instantiate all the scripts that are imported in /scripts/__init__.py
saves each script that could be instantiated into a .b26 file in the folder path
Args:
target_folder: target path for .b26 files
source_folder: location of python script files
|
def export_default_scripts(target_folder, source_folder = None, raise_errors = False, verbose=False):
scripts_to_load = get_classes_in_folder(source_folder, Script)
if verbose:
print(('attempt to load {:d} scripts: '.format(len(scripts_to_load))))
loaded_scripts, failed, loaded_instruments = Script.load_and_append(scripts_to_load, raise_errors=raise_errors)
for name, value in loaded_scripts.items():
filename = os.path.join(target_folder, '{:s}.b26'.format(name))
value.save_b26(filename)
if verbose:
print('\n================================================')
print('================================================')
print(('saved {:d} scripts, {:d} failed'.format(len(loaded_scripts), len(failed))))
if failed != {}:
for error_name, error in failed.items():
print(('failed to create script: ', error_name, error))
| 849,795 |
Create a new OEmbedEndpoint object.
Args:
url: The url of a provider API (API endpoint).
urlSchemes: A list of URL schemes for this endpoint.
|
def __init__(self, url, urlSchemes=None):
self._urlApi = url
self._urlSchemes = {}
self._initRequestHeaders()
self._urllib = urllib2
if urlSchemes is not None:
for urlScheme in urlSchemes:
self.addUrlScheme(urlScheme)
self._implicitFormat = self._urlApi.find('{format}') != -1
| 849,848 |
Add a url scheme to this endpoint. It takes a url string and create
the OEmbedUrlScheme object internally.
Args:
url: The url string that represents a url scheme to add.
|
def addUrlScheme(self, url):
#@TODO: validate invalid url format according to http://oembed.com/
if not isinstance(url, str):
raise TypeError('url must be a string value')
if not url in self._urlSchemes:
self._urlSchemes[url] = OEmbedUrlScheme(url)
| 849,849 |
Try to find if url matches against any of the schemes within this
endpoint.
Args:
url: The url to match against each scheme
Returns:
True if a matching scheme was found for the url, False otherwise
|
def match(self, url):
try:
urlSchemes = self._urlSchemes.itervalues() # Python 2
except AttributeError:
urlSchemes = self._urlSchemes.values() # Python 3
for urlScheme in urlSchemes:
if urlScheme.match(url):
return True
return False
| 849,850 |
Format the input url and optional parameters, and provides the final url
where to get the given resource.
Args:
url: The url of an OEmbed resource.
**opt: Parameters passed to the url.
Returns:
The complete url of the endpoint and resource.
|
def request(self, url, **opt):
params = opt
params['url'] = url
urlApi = self._urlApi
if 'format' in params and self._implicitFormat:
urlApi = self._urlApi.replace('{format}', params['format'])
del params['format']
if '?' in urlApi:
return "%s&%s" % (urlApi, urllib.urlencode(params))
else:
return "%s?%s" % (urlApi, urllib.urlencode(params))
| 849,851 |
Convert the resource url to a complete url and then fetch the
data from it.
Args:
url: The url of an OEmbed resource.
**opt: Parameters passed to the url.
Returns:
OEmbedResponse object according to data fetched
|
def get(self, url, **opt):
return self.fetch(self.request(url, **opt))
| 849,852 |
Fetch url and create a response object according to the mime-type.
Args:
url: The url to fetch data from
Returns:
OEmbedResponse object according to data fetched
|
def fetch(self, url):
opener = self._urllib.build_opener()
opener.addheaders = self._requestHeaders.items()
response = opener.open(url)
headers = response.info()
raw = response.read()
raw = raw.decode('utf8')
if not 'Content-Type' in headers:
raise OEmbedError('Missing mime-type in response')
if headers['Content-Type'].find('application/xml') != -1 or \
headers['Content-Type'].find('text/xml') != -1:
response = OEmbedResponse.newFromXML(raw)
elif headers['Content-Type'].find('application/json') != -1 or \
headers['Content-Type'].find('text/javascript') != -1 or \
headers['Content-Type'].find('text/json') != -1:
response = OEmbedResponse.newFromJSON(raw)
else:
raise OEmbedError('Invalid mime-type in response - %s' % headers['Content-Type'])
return response
| 849,853 |
Get an OEmbedResponse from one of the providers configured in this
consumer according to the resource url.
Args:
url: The url of the resource to get.
format: Desired response format.
**opt: Optional parameters to pass in the url to the provider.
Returns:
OEmbedResponse object.
|
def embed(self, url, format='json', **opt):
if format not in ['json', 'xml']:
raise OEmbedInvalidRequest('Format must be json or xml')
opt['format'] = format
return self._request(url, **opt)
| 849,857 |
figures out the iterator type based on the script settings and (optionally) subscripts
Args:
script_settings: iterator_type
subscripts: subscripts
Returns:
|
def get_iterator_type(script_settings, subscripts={}):
if 'iterator_type' in script_settings:
# figure out the iterator type
if script_settings['iterator_type'] == 'Loop':
iterator_type = 'loop'
elif script_settings['iterator_type'] == 'Parameter Sweep':
iterator_type = 'sweep'
else:
raise TypeError('unknown iterator type')
else:
# asign the correct iterator script type
if 'sweep_param' in script_settings:
iterator_type = 'sweep'
elif 'num_loops' in script_settings:
iterator_type = 'loop'
else:
raise TypeError('unknown iterator type')
return iterator_type
| 849,861 |
this function takes care of signals emitted by the subscripts
Args:
progress_subscript: progress of subscript
|
def _receive_signal(self, progress_subscript):
self.progress = self._estimate_progress()
self.updateProgress.emit(int(self.progress))
| 849,863 |
Connect to the Herkulex bus
Connect to serial port to which Herkulex Servos are attatched
Args:
portname (str): The serial port name
baudrate (int): The serial port baudrate
Raises:
SerialException: Error occured while opening serial port
|
def connect(portname, baudrate):
global SERPORT
try:
SERPORT = serial.Serial(portname, baudrate, timeout = 0.1)
except:
raise HerkulexError("could not open the serial port")
| 849,868 |
Calculate Checksum 1
Calculate the ckecksum 1 required for the herkulex data packet
Args:
data (list): the data of which checksum is to be calculated
stringlength (int): the length of the data
Returns:
int: The calculated checksum 1
|
def checksum1(data, stringlength):
value_buffer = 0
for count in range(0, stringlength):
value_buffer = value_buffer ^ data[count]
return value_buffer&0xFE
| 849,869 |
Send data to herkulex
Paketize & write the packet to serial port
Args:
data (list): the data to be sent
Raises:
SerialException: Error occured while opening serial port
|
def send_data(data):
datalength = len(data)
csm1 = checksum1(data, datalength)
csm2 = checksum2(csm1)
data.insert(0, 0xFF)
data.insert(1, 0xFF)
data.insert(5, csm1)
data.insert(6, csm2)
stringtosend = ""
for i in range(len(data)):
byteformat = '%02X' % data[i]
stringtosend = stringtosend + "\\x" + byteformat
try:
SERPORT.write(stringtosend.decode('string-escape'))
#print stringtosend
except:
raise HerkulexError("could not communicate with motors")
| 849,870 |
Clears the errors register of all Herkulex servos
Args:
none
|
def clear_errors():
data = []
data.append(0x0B)
data.append(BROADCAST_ID)
data.append(RAM_WRITE_REQ)
data.append(STATUS_ERROR_RAM)
data.append(BYTE2)
data.append(0x00)
data.append(0x00)
send_data(data)
| 849,871 |
Scan for the herkulex servos connected
This function will scan for all the herkulex servos connected
to the bus.
Args:
none
Returns:
list: a list of tuples of the form [(id, model)]
|
def scan_servos():
servos = []
for servo_id in range(0x00, 0xFE):
model = get_model(servo_id)
if model:
servos += [(servo_id, model)]
return servos
| 849,873 |
Get the servo model
This function gets the model of the herkules servo, provided its id
Args:
servoid(int): the id of the servo
Returns:
int: an integer corresponding to the model number
0x06 for DRS-602
0x04 for DRS-402
0x02 for DRS-202
|
def get_model(servoid):
data = []
data.append(0x09)
data.append(servoid)
data.append(EEP_READ_REQ)
data.append(MODEL_NO1_EEP)
data.append(BYTE1)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(12)
return ord(rxdata[9])&0xFF
except:
raise HerkulexError("could not communicate with motors")
| 849,874 |
Get the error status of servo
This function gets the error status (if any) of the servo
Args:
none
Returns:
int: an integer corresponding to the servo status
* refer datasheet
|
def get_servo_status(self):
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(STATUS_ERROR_RAM)
data.append(BYTE1)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(12)
return ord(rxdata[9])&0xFF
except:
raise HerkulexError("could not communicate with motors")
| 849,875 |
Get the detailed error status of servo
This function gets the detailed error status (if any) of the servo
Args:
none
Returns:
int: an integer corresponding to the servo status
* refer datasheet
|
def get_servo_status_detail(self):
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(STATUS_DETAIL_RAM)
data.append(BYTE1)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(12)
return ord(rxdata[9])&0xFF
except HerkulexError:
raise HerkulexError("could not communicate with motors")
| 849,876 |
Set the LED Color of Herkulex
Args:
colorcode (int): The code for colors
(0x00-OFF
0x02-BLUE
0x03-CYAN
0x04-RED
0x05-ORANGE
0x06-VIOLET
0x07-WHITE
|
def set_led(self, colorcode):
data = []
data.append(0x0A)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(LED_CONTROL_RAM)
data.append(0x01)
data.append(colorcode)
send_data(data)
| 849,877 |
Set the Brakes of Herkulex
In braked mode, position control and velocity control
will not work, enable torque before that
Args:
none
|
def brake_on(self):
data = []
data.append(0x0A)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(TORQUE_CONTROL_RAM)
data.append(0x01)
data.append(0x40)
send_data(data)
| 849,878 |
Set the torques of Herkulex to zero
In this mode, position control and velocity control
will not work, enable torque before that. Also the
servo shaft is freely movable
Args:
none
|
def torque_off(self):
data = []
data.append(0x0A)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(TORQUE_CONTROL_RAM)
data.append(0x01)
data.append(0x00)
send_data(data)
| 849,879 |
Enable the torques of Herkulex
In this mode, position control and velocity control
will work.
Args:
none
|
def torque_on(self):
data = []
data.append(0x0A)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(TORQUE_CONTROL_RAM)
data.append(0x01)
data.append(0x60)
send_data(data)
| 849,880 |
Set the position of Herkulex
Enable torque using torque_on function before calling this
Args:
goalposition (int): The desired position, min-0 & max-1023
goaltime (int): the time taken to move from present
position to goalposition
led (int): the LED color
0x00 LED off
0x04 GREEN
0x08 BLUE
0x10 RED
|
def set_servo_position(self, goalposition, goaltime, led):
goalposition_msb = int(goalposition) >> 8
goalposition_lsb = int(goalposition) & 0xff
data = []
data.append(0x0C)
data.append(self.servoid)
data.append(I_JOG_REQ)
data.append(goalposition_lsb)
data.append(goalposition_msb)
data.append(led)
data.append(self.servoid)
data.append(goaltime)
send_data(data)
| 849,882 |
Gets the current position of Herkulex
Args:
none
Returns:
int: position of the servo- 0 to 1023
Raises:
SerialException: Error occured while opening serial port
|
def get_servo_position(self):
#global SERPORT
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(CALIBRATED_POSITION_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
if (self.servomodel==0x06) or (self.servomodel == 0x04):
return ((ord(rxdata[10])&0xff)<<8) | (ord(rxdata[9])&0xFF)
else:
#print ord(rxdata[9]),ord(rxdata[10])
return ((ord(rxdata[10])&0x03)<<8) | (ord(rxdata[9])&0xFF)
except HerkulexError:
print "Could not read from the servos. Check connection"
| 849,883 |
Gets the current temperature of Herkulex
Args:
none
Returns:
int: the current temperature register of Herkulex
Raises:
SerialException: Error occured while opening serial port
|
def get_servo_temperature(self):
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(TEMPERATURE_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
return ord(rxdata[9])
except HerkulexError:
raise HerkulexError("Could not communicate with motors")
| 849,884 |
Gets the current torque of Herkulex
Gives the current load on the servo shaft.
It is actually the PWM value to the motors
Args:
none
Returns:
int: the torque on servo shaft. range from -1023 to 1023
Raises:
SerialException: Error occured while opening serial port
|
def get_servo_torque(self):
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(PWM_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
if ord(rxdata[10])<=127:
return ((ord(rxdata[10])&0x03)<<8) | (ord(rxdata[9])&0xFF)
else:
return (ord(rxdata[10])-0xFF)*0xFF + (ord(rxdata[9])&0xFF)-0xFF
except HerkulexError:
raise HerkulexError("could not communicate with motors")
| 849,885 |
Set the Herkulex in continuous rotation mode
Args:
goalspeed (int): the speed , range -1023 to 1023
led (int): the LED color
0x00 LED off
0x04 GREEN
0x08 BLUE
0x10 RED
|
def set_servo_speed(self, goalspeed, led):
if goalspeed>0 :
goalspeed_msb = (int(goalspeed)& 0xFF00) >> 8
goalspeed_lsb = int(goalspeed) & 0xff
elif goalspeed<0 :
goalspeed_msb = 64+(255- ((int(goalspeed)& 0xFF00) >> 8))
goalspeed_lsb = (abs(goalspeed) & 0xff)
#print goalspeed_msb,goalspeed_lsb
data = []
data.append(0x0C)
data.append(self.servoid)
data.append(I_JOG_REQ)
data.append(goalspeed_lsb)
data.append(goalspeed_msb)
data.append(0x02|led)
data.append(self.servoid)
data.append(0x00)
send_data(data)
| 849,886 |
Set the P gain of the position PID
Args:
pvalue (int): P value
|
def set_position_p(self, pvalue):
pvalue_msb = int(pvalue) >> 8
pvalue_lsb = int(pvalue) & 0xff
data = []
data.append(0x0B)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(POSITION_KP_RAM)
data.append(BYTE2)
data.append( pvalue_lsb)
data.append( pvalue_msb)
send_data(data)
| 849,887 |
Set the I gain of the position PID
Args:
ivalue (int): I value
|
def set_position_i(self, ivalue):
ivalue_msb = int(ivalue) >> 8
ivalue_lsb = int(ivalue) & 0xff
data = []
data.append(0x0B)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(POSITION_KI_RAM)
data.append(BYTE2)
data.append(ivalue_lsb)
data.append(ivalue_msb)
send_data(data)
| 849,888 |
Set the D gain of the PID
Args:
dvalue (int): D value
|
def set_position_d(self, dvalue):
dvalue_msb = int(dvalue) >> 8
dvalue_lsb = int(dvalue) & 0xff
data = []
data.append(0x0B)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(POSITION_KD_RAM)
data.append(BYTE2)
data.append(dvalue_lsb)
data.append(dvalue_msb)
send_data(data)
| 849,889 |
Sets the servo angle (in degrees)
Enable torque using torque_on function before calling this
Args:
goalangle (int): The desired angle in degrees, range -150 to 150
goaltime (int): the time taken to move from present
position to goalposition
led (int): the LED color
0x00 LED off
0x04 GREEN
0x08 BLUE
0x10 RED
|
def set_servo_angle(self, goalangle, goaltime, led):
if (self.servomodel==0x06) or (self.servomodel == 0x04):
goalposition = scale(goalangle, -159.9, 159.6, 10627, 22129)
else:
goalposition = scale(goalangle, -150, 150, 21, 1002)
self.set_servo_position(goalposition, goaltime, led)
| 849,894 |
Gets the current angle of the servo in degrees
Args:
none
Returns:
int : the current servo angle
|
def get_servo_angle(self):
servoposition = self.get_servo_position()
if (self.servomodel==0x06) or (self.servomodel == 0x04):
return scale(servoposition, 10627, 22129, -159.9, 159.6)
else:
return scale(servoposition, 21, 1002, -150, 150)
| 849,895 |
executes scripts and stores script parameters and settings
Args:
name (optional): name of script, if not provided take name of function
settings (optional): a Parameter object that contains all the information needed in the script
instruments (optional): instruments used in the script
scripts (optional): sub_scripts used in the script
log_function(optional): function reference that takes a string
|
def __init__(self, name=None, settings=None, instruments=None, scripts=None, log_function=None, data_path=None):
QObject.__init__(self)
self._script_class = self.__class__.__name__
if name is None:
name = self.__class__.__name__
self.name = name
self._instruments = {}
if instruments is None:
instruments = {}
else:
assert isinstance(instruments, dict)
assert set(self._INSTRUMENTS.keys()) <= set(instruments.keys())
self.data_path = data_path
self.instruments = {key: instruments[key] for key in list(self._INSTRUMENTS.keys())}
self._scripts = {}
if scripts is None:
scripts = {}
self.scripts = scripts
# set end time to be before start time if script hasn't been excecuted this tells us
self.start_time = datetime.datetime.now()
self.end_time = self.start_time - datetime.timedelta(seconds=1)
self._settings = deepcopy(Parameter(self._DEFAULT_SETTINGS + Script._DEFAULT_SETTINGS))
self._settings.update({'tag':self.name.lower()})
if settings is not None:
self.update(settings)
self._abort = False
self.is_running = False
# data hold the data generated by the script,
# this should either be a dictionary or a deque of dictionaries
self.data = {}
# a log for status outputs
self.log_data = deque()
# this can be overwritten
self.log_function = log_function
# default value is 'none', overwrite this in script if it has plotting capabilities
self._plot_refresh = True
self.progress = None
self._current_subscript_stage = {
'current_subscript': None,
'subscript_exec_count':{},
'subscript_exec_duration':{}
}
| 849,921 |
updates the internal dictionary
Args:
settings: parameters to be set
# mabe in the future:
# Returns: boolean that is true if update successful
|
def update(self, settings):
if 'settings' in settings:
self._settings.update(settings['settings'])
else:
self._settings.update(settings)
if 'instruments' in settings:
for instrument_name, instrument_setting in settings['instruments'].items():
self.instruments[instrument_name]['settings'].update(instrument_setting['settings'])
if 'scripts' in settings:
for script_name, script_setting in settings['scripts'].items():
self.scripts[script_name].update(script_setting)
| 849,928 |
this function takes care of signals emitted by the subscripts
the default behaviour is that it just reemits the signal
Args:
progress: progress of subscript
|
def _receive_signal(self, progress):
# print(datetime.datetime.now().strftime("%B %d, %Y %H:%M:%S"), self.name,QtCore.QThread.currentThread(), self._current_subscript_stage['current_subscript'].name,
# 'received signal. emitting....')
self.progress = progress
self.updateProgress.emit(progress)
| 849,932 |
creates a filename based
Args:
appendix: appendix for file
Returns: filename
|
def filename(self, appendix=None, create_if_not_existing=False):
# if provided path is a relative path and self.data_path exists, build path
if os.path.isabs(self.settings['path']) == False and self.data_path is not None:
path = os.path.join(self.data_path, self.settings['path'])
else:
path = self.settings['path']
tag = self.settings['tag']#.replace('.','-')
filename = os.path.join(path, "{:s}_{:s}".format(self.start_time.strftime('%y%m%d-%H_%M_%S'),tag))
if os.path.exists(filename) == False and create_if_not_existing:
os.makedirs(filename)
if appendix is not None:
filename = os.path.join(filename, "{:s}_{:s}{:s}".format(self.start_time.strftime('%y%m%d-%H_%M_%S'),tag,appendix))
# windows can't deal with long filenames so we have to use the prefix '\\\\?\\'
# if len(filename.split('\\\\?\\')) == 1:
# filename = '\\\\?\\' + filename
return filename
| 849,935 |
saves the instance of the script to a file using pickle
Args:
filename: target filename
|
def save(self, filename):
if filename is None:
filename = self.filename('.b26s')
# if len(filename.split('\\\\?\\')) == 1:
# filename = '\\\\?\\' + filename
with open(filename, 'w') as outfile:
outfile.write(pickle.dumps(self.__dict__))
| 849,939 |
loads an script instance using pickle
Args:
filename: source filename
instruments:
optional - only needed if script requires instruments
dictionary of form
instruments = {
name_of_instrument_1 : instance_of_instrument_1,
name_of_instrument_2 : instance_of_instrument_2,
...
}
Returns:
script_instance
updated_instruments
|
def load(filename, instruments = None):
with open(filename, 'r') as infile:
dataPickle = infile.read()
script_as_dict = pickle.loads(dataPickle)
script_class = script_as_dict['_script_class']
script_instance, _, updated_instruments = Script.load_and_append({'script': script_class}, instruments = instruments)
script_instance = script_instance['script']
# save references to instruments
instruments = script_instance._instruments
# update the script instance
script_instance.__dict__ = script_as_dict
# update references to instruments
script_instance._instruments = instruments
return script_instance, updated_instruments
| 849,940 |
loads the data that has been save with Script.save.
Args:
path: path to folder saved by Script.save or raw_data folder within
Returns:
a dictionary with the data of form
data = {param_1_name: param_1_data, ...}
|
def load_data(path):
# check that path exists
if not os.path.exists(path):
print(path)
raise AttributeError('Path given does not exist!')
# windows can't deal with long filenames (>260 chars) so we have to use the prefix '\\\\?\\'
# if len(path.split('\\\\?\\')) == 1:
# path = '\\\\?\\' + os.path.abspath(path)
# if raw_data folder exists, get a list of directories from within it; otherwise, get names of all .csv files in
# current directory
data = {}
# if self.RAW_DATA_DIR in os.listdir(path): #8/26/16 AK: self not defined in static context
# data_files = os.listdir(os.path.join(path, self.RAW_DATA_DIR + '/'))
# path = os.path.join(path, self.RAW_DATA_DIR + '/')
#
# else:
if 'raw_data' in os.listdir(path): #temporarily hardcoded
data_files = os.listdir(os.path.join(path, 'raw_data' + '/'))
path = os.path.join(path, 'raw_data' + '/')
else:
data_files = glob.glob(os.path.join(path, '*.csv'))
# If no data files were found, raise error
if not data_files:
raise AttributeError('Could not find data files in {:s}'.format(path))
# import data from each csv
for data_file in data_files:
# get data name, read the data from the csv, and save it to dictionary
data_name = data_file.split('-')[-1][0:-4] # JG: why do we strip of the date?
imported_data_df = pd.read_csv(os.path.join(path, data_file))
# check if there are real headers, if the headers are digits than we ignore them because then they are just indecies
# real headers are strings (however, the digits are also of type str! that why we use the isdigit method)
column_headers = list(imported_data_df.columns.values)
if sum([int(x.isdigit()) for x in column_headers]) != len(column_headers):
data[data_name] = {h: imported_data_df[h].as_matrix() for h in column_headers}
else:
# note, np.squeeze removes extraneous length-1 dimensions from the returned 'matrix' from the dataframe
data[data_name] = np.squeeze(imported_data_df.as_matrix())
return data
| 849,941 |
wrapper to get the module for a script
Args:
script_information: information of the script. This can be
- a dictionary
- a Script instance
- name of Script class
package (optional): name of the package to which the script belongs, i.e. pylabcontrol or b26toolkit only used when script_information is a string
Returns:
module
|
def get_script_module(script_information, package='pylabcontrol', verbose=False):
module, _, _, _, _, _, _ = Script.get_script_information(script_information=script_information, package=package, verbose=verbose)
return module
| 849,942 |
plots the data contained in self.data, which should be a dictionary or a deque of dictionaries
for the latter use the last entry
Args:
figure_list: list of figure objects that are passed to self.get_axes_layout to get axis objects for plotting
|
def plot(self, figure_list):
# if there is not data we do not plot anything
if not self.data:
return
# if plot function is called when script is not running we request a plot refresh
if not self.is_running:
self._plot_refresh = True
axes_list = self.get_axes_layout(figure_list)
if self._plot_refresh is True:
self._plot(axes_list)
self._plot_refresh = False
for figure in figure_list:
if figure.axes:
figure.set_tight_layout(True)
else:
self._update_plot(axes_list)
| 849,944 |
returns the axes objects the script needs to plot its data
the default creates a single axes object on each figure
This can/should be overwritten in a child script if more axes objects are needed
Args:
figure_list: a list of figure objects
Returns:
axes_list: a list of axes objects
|
def get_axes_layout(self, figure_list):
axes_list = []
if self._plot_refresh is True:
for fig in figure_list:
fig.clf()
axes_list.append(fig.add_subplot(111))
else:
for fig in figure_list:
axes_list.append(fig.axes[0])
return axes_list
| 849,945 |
function is overloaded:
- read_probes()
- read_probes(key)
Args:
key: name of requested value
Returns:
- if called without argument: returns the values of all probes in dictionary form
- if called with argument: returns the value the requested key
|
def read_probes(self, key = None):
print(('xxxxx probes', key, self._PROBES()))
if key is None:
# return the value all probe in dictionary form
d = {}
for k in list(self._PROBES.keys()):
d[k] = self.read_probes(k)
return d
else:
# return the value of the requested key if the key corresponds to a valid probe
assert key in list(self._PROBES.keys())
value = None
return value
| 849,992 |
allows to read instrument inputs in the form value = instrument.input
Args:
name: name of input channel
Returns: value of input channel
|
def __getattr__(self, name):
# try:
# print('xxxxx', name, self._PROBES())
# xx = self.read_probes(name)
# print(xx)
# return xx
# # return self.read_probes(name)
# except:
# # restores standard behavior for missing keys
# if not str(name) in ['_initialized', '_settings']:
# print('class ' + type(self).__name__ + ' has no attribute ' + str(name))
# raise AttributeError('class ' + type(self).__name__ + ' has no attribute ' + str(name))
if not str(name) in ['_initialized', '_settings']:
try:
# print('xxxxx name', name)
xx = self.read_probes(name)
# print(xx)
return xx
# return self.read_probes(name)
except:
# restores standard behavior for missing keys
print(('class ' + type(self).__name__ + ' has no attribute ' + str(name)))
raise AttributeError('class ' + type(self).__name__ + ' has no attribute ' + str(name))
| 849,993 |
Split string by split character space(' ') and indent('\t') as default
Examples:
>>> StringClass.split_string('exec -ini test.ini', ' ')
['exec', '-ini', 'test.ini']
Args:
str_src: source string
spliters: e.g. [' ', '\t'], [], ' ', None
elim_empty: Eliminate empty (i.e., '') or not.
Returns:
split sub-strings as list
|
def split_string(str_src, spliters=None, elim_empty=False):
# type: (AnyStr, Union[AnyStr, List[AnyStr], None], bool) -> List[AnyStr]
if is_string(spliters):
spliters = [spliters]
if spliters is None or not spliters:
spliters = [' ', '\t']
dest_strs = list()
src_strs = [str_src]
while True:
old_dest_strs = src_strs[:]
for s in spliters:
for src_s in src_strs:
temp_strs = src_s.split(s)
for temp_s in temp_strs:
temp_s = temp_s.strip()
if temp_s == '' and elim_empty:
continue
if is_string(temp_s):
temp_s = str(temp_s)
dest_strs.append(temp_s)
src_strs = dest_strs[:]
dest_strs = list()
if old_dest_strs == src_strs:
dest_strs = src_strs[:]
break
return dest_strs
| 850,100 |
get file names with the given suffixes in the given directory
Args:
dir_src: directory path
suffixes: wanted suffixes list, the suffix in suffixes can with or without '.'
Returns:
file names with the given suffixes as list
|
def get_filename_by_suffixes(dir_src, suffixes):
# type: (AnyStr, Union[AnyStr, List[AnyStr]]) -> Optional[List[AnyStr]]
list_files = os.listdir(dir_src)
re_files = list()
if is_string(suffixes):
suffixes = [suffixes]
if not isinstance(suffixes, list):
return None
for i, suf in enumerate(suffixes):
if len(suf) >= 1 and suf[0] != '.':
suffixes[i] = '.' + suf
for f in list_files:
name, ext = os.path.splitext(f)
if StringClass.string_in_list(ext, suffixes):
re_files.append(f)
return re_files
| 850,111 |
get full file names with the given suffixes in the given directory
Args:
dir_src: directory path
suffixes: wanted suffixes
Returns:
full file names with the given suffixes as list
|
def get_full_filename_by_suffixes(dir_src, suffixes):
# type: (AnyStr, Union[AnyStr, List[AnyStr]]) -> Optional[List[AnyStr]]
file_names = FileClass.get_filename_by_suffixes(dir_src, suffixes)
if file_names is None:
return None
return list(dir_src + os.sep + name for name in file_names)
| 850,112 |
Execute external command, and return the output lines list. In windows, refers to
`handling-subprocess-crash-in-windows`_.
Args:
commands: string or list
Returns:
output lines
.. _handling-subprocess-crash-in-windows:
https://stackoverflow.com/questions/5069224/handling-subprocess-crash-in-windows
|
def run_command(commands):
# type: (Union[AnyStr, List[AnyStr]]) -> List[AnyStr]
# commands = StringClass.convert_unicode2str(commands)
# print(commands)
use_shell = False
subprocess_flags = 0
startupinfo = None
if sysstr == 'Windows':
if isinstance(commands, list):
commands = ' '.join(str(c) for c in commands)
import ctypes
SEM_NOGPFAULTERRORBOX = 0x0002 # From MSDN
ctypes.windll.kernel32.SetErrorMode(SEM_NOGPFAULTERRORBOX)
subprocess_flags = 0x8000000 # win32con.CREATE_NO_WINDOW?
# this startupinfo structure prevents a console window from popping up on Windows
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# not sure if node outputs on stderr or stdout so capture both
else: # for Linux/Unix OS, commands is better to be a list.
if is_string(commands):
use_shell = True
# https://docs.python.org/2/library/subprocess.html
# Using shell=True can be a security hazard.
elif isinstance(commands, list):
# the executable path may be enclosed with quotes, if not windows, delete the quotes
if commands[0][0] == commands[0][-1] == '"' or \
commands[0][0] == commands[0][-1] == "'":
commands[0] = commands[0][1:-1]
for idx, v in enumerate(commands):
if isinstance(v, int) or isinstance(v, float):
# Fix :TypeError: execv() arg 2 must contain only strings
commands[idx] = repr(v)
print(commands)
process = subprocess.Popen(commands, shell=use_shell, stdout=subprocess.PIPE,
stdin=open(os.devnull),
stderr=subprocess.STDOUT, universal_newlines=True,
startupinfo=startupinfo,
creationflags=subprocess_flags)
out, err = process.communicate()
recode = process.returncode
if out is None:
return ['']
if recode is not None and recode != 0:
raise subprocess.CalledProcessError(-1, commands,
"ERROR occurred when running subprocess!")
if '\n' in out:
return out.split('\n')
return [out]
| 850,116 |
Draw every pixel's ID
After computing all given value's pixels connectivity, every pixel will
have an ID. Then we need to draw these pixels' ID on the undrawed
rasterfile.
Args:
ID: given ID value
idx_array: pixels position set which have the given ID value
drawID_raster: undrawed rasterfile
Return:
drawID_raster: rasterfile after drawing ID
|
def draw_ID(ID, idx_array, drawID_raster):
for i in range(idx_array.shape[0]):
x = idx_array[i, 0]
y = idx_array[i, 1]
drawID_raster[x, y] = ID
return drawID_raster
| 850,129 |
Add an individual to the family.
Arguments:
individual_object (Individual)
|
def add_individual(self, individual_object):
ind_id = individual_object.individual_id
self.logger.info("Adding individual {0}".format(ind_id))
family_id = individual_object.family
if family_id != self.family_id:
raise PedigreeError(self.family, individual_object.individual_id,
"Family id of individual is not the same as family id for "\
"Family object!")
else:
self.individuals[ind_id] = individual_object
self.logger.debug("Individual {0} added to family {1}".format(
ind_id, family_id
))
return
| 850,289 |
Return the phenotype of an individual
If individual does not exist return 0
Arguments:
individual_id (str): Represents the individual id
Returns:
int : Integer that represents the phenotype
|
def get_phenotype(self, individual_id):
phenotype = 0 # This is if unknown phenotype
if individual_id in self.individuals:
phenotype = self.individuals[individual_id].phenotype
return phenotype
| 850,290 |
Plots a dot on top of each selected NV, with a corresponding number denoting the order in which the NVs are
listed.
Precondition: must have an existing image in figure_list[0] to plot over
Args:
figure_list:
|
def plot(self, figure_list):
# if there is not image data get it from the current plot
if not self.data == {} and self.data['image_data'] is None:
axes = figure_list[0].axes[0]
if len(axes.images)>0:
self.data['image_data'] = np.array(axes.images[0].get_array())
self.data['extent'] = np.array(axes.images[0].get_extent())
self.plot_settings['cmap'] = axes.images[0].get_cmap().name
self.plot_settings['xlabel'] = axes.get_xlabel()
self.plot_settings['ylabel'] = axes.get_ylabel()
self.plot_settings['title'] = axes.get_title()
self.plot_settings['interpol'] = axes.images[0].get_interpolation()
Script.plot(self, figure_list)
| 850,327 |
Plots a dot on top of each selected NV, with a corresponding number denoting the order in which the NVs are
listed.
Precondition: must have an existing image in figure_list[0] to plot over
Args:
figure_list:
|
def _plot(self, axes_list):
axes = axes_list[0]
if self.plot_settings:
axes.imshow(self.data['image_data'], cmap=self.plot_settings['cmap'], interpolation=self.plot_settings['interpol'], extent=self.data['extent'])
axes.set_xlabel(self.plot_settings['xlabel'])
axes.set_ylabel(self.plot_settings['ylabel'])
axes.set_title(self.plot_settings['title'])
self._update(axes_list)
| 850,328 |
If there is not currently a selected NV within self.settings[patch_size] of pt, adds it to the selected list. If
there is, removes that point from the selected list.
Args:
pt: the point to add or remove from the selected list
Poststate: updates selected list
|
def toggle_NV(self, pt):
if not self.data['nv_locations']: #if self.data is empty so this is the first point
self.data['nv_locations'].append(pt)
self.data['image_data'] = None # clear image data
else:
# use KDTree to find NV closest to mouse click
tree = scipy.spatial.KDTree(self.data['nv_locations'])
#does a search with k=1, that is a search for the nearest neighbor, within distance_upper_bound
d, i = tree.query(pt,k = 1, distance_upper_bound = self.settings['patch_size'])
# removes NV if previously selected
if d is not np.inf:
self.data['nv_locations'].pop(i)
# adds NV if not previously selected
else:
self.data['nv_locations'].append(pt)
# if type is not free we calculate the total points of locations from the first selected points
if self.settings['type'] == 'square' and len(self.data['nv_locations'])>1:
# here we create a rectangular grid, where pts a and be define the top left and bottom right corner of the rectangle
Nx, Ny = self.settings['Nx'], self.settings['Ny']
pta = self.data['nv_locations'][0]
ptb = self.data['nv_locations'][1]
tmp = np.array([[[pta[0] + 1.0*i*(ptb[0]-pta[0])/(Nx-1), pta[1] + 1.0*j*(ptb[1]-pta[1])/(Ny-1)] for i in range(Nx)] for j in range(Ny)])
self.data['nv_locations'] = np.reshape(tmp, (Nx * Ny, 2))
self.stop()
elif self.settings['type'] == 'line' and len(self.data['nv_locations'])>1:
# here we create a straight line between points a and b
N = self.settings['Nx']
pta = self.data['nv_locations'][0]
ptb = self.data['nv_locations'][1]
self.data['nv_locations'] = [np.array([pta[0] + 1.0*i*(ptb[0]-pta[0])/(N-1), pta[1] + 1.0*i*(ptb[1]-pta[1])/(N-1)]) for i in range(N)]
self.stop()
elif self.settings['type'] == 'ring' and len(self.data['nv_locations'])>1:
# here we create a circular grid, where pts a and be define the center and the outermost ring
Nx, Ny = self.settings['Nx'], self.settings['Ny']
pta = self.data['nv_locations'][0] # center
ptb = self.data['nv_locations'][1] # outermost ring
# radius of outermost ring:
rmax = np.sqrt((pta[0] - ptb[0]) ** 2 + (pta[1] - ptb[1]) ** 2)
# create points on rings
tmp = []
for r in np.linspace(rmax, 0, Ny + 1)[0:-1]:
for theta in np.linspace(0, 2 * np.pi, Nx+1)[0:-1]:
tmp += [[r * np.sin(theta)+pta[0], r * np.cos(theta)+pta[1]]]
self.data['nv_locations'] = np.array(tmp)
self.stop()
| 850,330 |
loads the data that has been save with Script.save.
Args:
path: path to folder saved by Script.save or raw_data folder within
verbose: if true print additional information
raise_errors: if true raise errors if false just print to std out
Returns:
a dictionary with the data of form
data = {param_1_name: param_1_data, ...}
|
def load_data(path, verbose=False, raise_errors = False):
# check that path exists
if not os.path.exists(path):
if raise_errors:
raise AttributeError('Path given does not exist!')
else:
print('Path given does not exist!')
return
# windows can't deal with long filenames (>260 chars) so we have to use the prefix '\\\\?\\'
# if len(path.split('\\\\?\\')) == 1:
# path = '\\\\?\\' + os.path.abspath(path)
path = Script.check_filename(path)
if verbose:
print('script path', path)
# if raw_data folder exists, get a list of directories from within it; otherwise, get names of all .csv files in
# current directory
data = {}
# if self.RAW_DATA_DIR in os.listdir(path): #8/26/16 AK: self not defined in static context
# data_files = os.listdir(os.path.join(path, self.RAW_DATA_DIR + '/'))
# path = os.path.join(path, self.RAW_DATA_DIR + '/')
#
# else:
if 'raw_data' in os.listdir(path): #temporarily hardcoded
if verbose:
print('raw_data subfolder found')
data_files = os.listdir(os.path.join(path, 'raw_data' + '/'))
path = os.path.join(path, 'raw_data' + '/')
else:
data_files = glob.glob(os.path.join(path, '*.csv'))
if verbose:
print('data_files found', data_files)
# If no data files were found, raise error
if not data_files:
if raise_errors:
raise AttributeError('Could not find data files in {:s}'.format(path))
else:
print('Could not find data files in {:s}'.format(path))
return
# import data from each csv
for data_file in data_files:
# get data name, read the data from the csv, and save it to dictionary
data_name = data_file.split('-')[-1][0:-4] # JG: why do we strip of the date?
try:
imported_data_df = pd.read_csv(os.path.join(path, data_file))
# check if there are real headers, if the headers are digits than we ignore them because then they are just indecies
# real headers are strings (however, the digits are also of type str! that why we use the isdigit method)
column_headers = list(imported_data_df.columns.values)
if sum([int(x.isdigit()) for x in column_headers]) != len(column_headers):
data[data_name] = {h: imported_data_df[h].values for h in column_headers}
else:
# note, np.squeeze removes extraneous length-1 dimensions from the returned 'matrix' from the dataframe
data[data_name] = np.squeeze(imported_data_df.values)
except pd.errors.EmptyDataError as err:
if raise_errors:
raise err('data file ' + data_file + ' is empty: did not load!')
else:
print('data file ' + data_file + ' is empty: did not load!')
return data
| 850,336 |
loads the settings that has been save with Script.save_b26.
Args:
path: path to folder saved by Script.save_b26
setttings_only: if true returns only the settings if the .b26 file contains only a single script
Returns:
a dictionary with the settings
|
def load_settings(path, setttings_only = True):
# check that path exists
if not os.path.exists(path):
print(path)
raise AttributeError('Path given does not exist!')
tag = '_'.join(os.path.basename(os.path.dirname(os.path.abspath(path) + '/')).split('_')[3:])
search_str = os.path.abspath(path)+'/*'+tag +'.b26'
fname = glob.glob(search_str)
if len(fname)>1:
print(('warning more than one .b26 file found, loading ', fname[0]))
elif len(fname) == 0:
print(('no .b26 file found in folder {:s}, check path !'.format(search_str)))
return
fname = fname[0]
fname = Script.check_filename(fname)
settings = load_b26_file(fname)['scripts']
if len(list(settings.keys())) == 1 and setttings_only:
settings = settings[list(settings.keys())[0]]['settings']
return settings
| 850,337 |
TEMPORARY / UNDER DEVELOPMENT
THIS IS TO ALLOW COPYING OF PARAMETERS VIA DRAP AND DROP
Args:
object:
event:
Returns:
|
def eventFilter(self, object, event):
if (object is self.tree_scripts):
# print('XXXXXXX = event in scripts', event.type(),
# QtCore.QEvent.DragEnter, QtCore.QEvent.DragMove, QtCore.QEvent.DragLeave)
if (event.type() == QtCore.QEvent.ChildAdded):
item = self.tree_scripts.selectedItems()[0]
if not isinstance(item.value, Script):
print('ONLY SCRIPTS CAN BE DRAGGED')
return False
print(('XXX ChildAdded', self.tree_scripts.selectedItems()[0].name))
# if event.mimeData().hasUrls():
# event.accept() # must accept the dragEnterEvent or else the dropEvent can't occur !!!
# print "accept"
# else:
# event.ignore()
# print "ignore"
if (event.type() == QtCore.QEvent.ChildRemoved):
print(('XXX ChildRemoved', self.tree_scripts.selectedItems()[0].name))
if (event.type() == QtCore.QEvent.Drop):
print('XXX Drop')
# if event.mimeData().hasUrls(): # if file or link is dropped
# urlcount = len(event.mimeData().urls()) # count number of drops
# url = event.mimeData().urls()[0] # get first url
# object.setText(url.toString()) # assign first url to editline
# # event.accept() # doesnt appear to be needed
return False # lets the event continue to the edit
return False
| 850,355 |
sets the filename to which the probe logging function will write
Args:
checked: boolean (True: opens file) (False: closes file)
|
def set_probe_file_name(self, checked):
if checked:
file_name = os.path.join(self.gui_settings['probes_log_folder'], '{:s}_probes.csv'.format(datetime.datetime.now().strftime('%y%m%d-%H_%M_%S')))
if os.path.isfile(file_name) == False:
self.probe_file = open(file_name, 'a')
new_values = self.read_probes.probes_values
header = ','.join(list(np.array([['{:s} ({:s})'.format(p, instr) for p in list(p_dict.keys())] for instr, p_dict in new_values.items()]).flatten()))
self.probe_file.write('{:s}\n'.format(header))
else:
self.probe_file.close()
| 850,356 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.