text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_n_cluster_per_event_hist(cluster_table): '''Calculates the number of cluster in every event. Parameters ---------- cluster_table : pytables.table Returns ------- numpy.Histogram ''' logging.info("Histogram number of cluster per event") cluster_in_events = analysis_utils.get_n_cluster_in_events(cluster_table)[:, 1] # get the number of cluster for every event return np.histogram(cluster_in_events, bins=range(0, np.max(cluster_in_events) + 2))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_data_statistics(interpreted_files): '''Quick and dirty function to give as redmine compatible iverview table ''' print '| *File Name* | *File Size* | *Times Stamp* | *Events* | *Bad Events* | *Measurement time* | *# SR* | *Hits* |' # Mean Tot | Mean rel. BCID' for interpreted_file in interpreted_files: with tb.open_file(interpreted_file, mode="r") as in_file_h5: # open the actual hit file n_hits = np.sum(in_file_h5.root.HistOcc[:]) measurement_time = int(in_file_h5.root.meta_data[-1]['timestamp_stop'] - in_file_h5.root.meta_data[0]['timestamp_start']) # mean_tot = np.average(in_file_h5.root.HistTot[:], weights=range(0,16) * np.sum(range(0,16)))# / in_file_h5.root.HistTot[:].shape[0] # mean_bcid = np.average(in_file_h5.root.HistRelBcid[:], weights=range(0,16)) n_sr = np.sum(in_file_h5.root.HistServiceRecord[:]) n_bad_events = int(np.sum(in_file_h5.root.HistErrorCounter[2:])) try: n_events = str(in_file_h5.root.Hits[-1]['event_number'] + 1) except tb.NoSuchNodeError: n_events = '~' + str(in_file_h5.root.meta_data[-1]['event_number'] + (in_file_h5.root.meta_data[-1]['event_number'] - in_file_h5.root.meta_data[-2]['event_number'])) else: print '|', os.path.basename(interpreted_file), '|', int(os.path.getsize(interpreted_file) / (1024.0 * 1024.0)), 'Mb |', time.ctime(os.path.getctime(interpreted_file)), '|', n_events, '|', n_bad_events, '|', measurement_time, 's |', n_sr, '|', n_hits, '|'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_bad_data(raw_data, prepend_data_headers=None, trig_count=None): """Checking FEI4 raw data array for corrupted data. """
consecutive_triggers = 16 if trig_count == 0 else trig_count is_fe_data_header = logical_and(is_fe_word, is_data_header) trigger_idx = np.where(is_trigger_word(raw_data) >= 1)[0] fe_dh_idx = np.where(is_fe_data_header(raw_data) >= 1)[0] n_triggers = trigger_idx.shape[0] n_dh = fe_dh_idx.shape[0] # get index of the last trigger if n_triggers: last_event_data_headers_cnt = np.where(fe_dh_idx > trigger_idx[-1])[0].shape[0] if consecutive_triggers and last_event_data_headers_cnt == consecutive_triggers: if not np.all(trigger_idx[-1] > fe_dh_idx): trigger_idx = np.r_[trigger_idx, raw_data.shape] last_event_data_headers_cnt = None elif last_event_data_headers_cnt != 0: fe_dh_idx = fe_dh_idx[:-last_event_data_headers_cnt] elif not np.all(trigger_idx[-1] > fe_dh_idx): trigger_idx = np.r_[trigger_idx, raw_data.shape] # if any data header, add trigger for histogramming, next readout has to have trigger word elif n_dh: trigger_idx = np.r_[trigger_idx, raw_data.shape] last_event_data_headers_cnt = None # no trigger, no data header # assuming correct data, return input values else: return False, prepend_data_headers, n_triggers, n_dh # # no triggers, check for the right amount of data headers # if consecutive_triggers and prepend_data_headers and prepend_data_headers + n_dh != consecutive_triggers: # return True, n_dh, n_triggers, n_dh n_triggers_cleaned = trigger_idx.shape[0] n_dh_cleaned = fe_dh_idx.shape[0] # check that trigger comes before data header if prepend_data_headers is None and n_triggers_cleaned and n_dh_cleaned and not trigger_idx[0] < fe_dh_idx[0]: return True, last_event_data_headers_cnt, n_triggers, n_dh # FIXME: 0? # check that no trigger comes before the first data header elif consecutive_triggers and prepend_data_headers is not None and n_triggers_cleaned and n_dh_cleaned and trigger_idx[0] < fe_dh_idx[0]: return True, last_event_data_headers_cnt, n_triggers, n_dh # FIXME: 0? # check for two consecutive triggers elif consecutive_triggers is None and prepend_data_headers == 0 and n_triggers_cleaned and n_dh_cleaned and trigger_idx[0] < fe_dh_idx[0]: return True, last_event_data_headers_cnt, n_triggers, n_dh # FIXME: 0? elif prepend_data_headers is not None: trigger_idx += (prepend_data_headers + 1) fe_dh_idx += (prepend_data_headers + 1) # for histogramming add trigger at index 0 trigger_idx = np.r_[0, trigger_idx] fe_dh_idx = np.r_[range(1, prepend_data_headers + 1), fe_dh_idx] event_hist, bins = np.histogram(fe_dh_idx, trigger_idx) if consecutive_triggers is None and np.any(event_hist == 0): return True, last_event_data_headers_cnt, n_triggers, n_dh elif consecutive_triggers and np.any(event_hist != consecutive_triggers): return True, last_event_data_headers_cnt, n_triggers, n_dh return False, last_event_data_headers_cnt, n_triggers, n_dh
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def print_raw_data_file(input_file, start_index=0, limit=200, flavor='fei4b', select=None, tdc_trig_dist=False, trigger_data_mode=0, meta_data_v2=True): """Printing FEI4 data from raw data file for debugging. """
with tb.open_file(input_file + '.h5', mode="r") as file_h5: if meta_data_v2: index_start = file_h5.root.meta_data.read(field='index_start') index_stop = file_h5.root.meta_data.read(field='index_stop') else: index_start = file_h5.root.meta_data.read(field='start_index') index_stop = file_h5.root.meta_data.read(field='stop_index') total_words = 0 for read_out_index, (index_start, index_stop) in enumerate(np.column_stack((index_start, index_stop))): if start_index < index_stop: print "\nchunk %d with length %d (from index %d to %d)\n" % (read_out_index, (index_stop - index_start), index_start, index_stop) raw_data = file_h5.root.raw_data.read(index_start, index_stop) total_words += print_raw_data(raw_data=raw_data, start_index=max(start_index - index_start, 0), limit=limit - total_words, flavor=flavor, index_offset=index_start, select=select, tdc_trig_dist=tdc_trig_dist, trigger_data_mode=trigger_data_mode) if limit and total_words >= limit: break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def print_raw_data(raw_data, start_index=0, limit=200, flavor='fei4b', index_offset=0, select=None, tdc_trig_dist=False, trigger_data_mode=0): """Printing FEI4 raw data array for debugging. """
if not select: select = ['DH', 'TW', "AR", "VR", "SR", "DR", 'TDC', 'UNKNOWN FE WORD', 'UNKNOWN WORD'] total_words = 0 for index in range(start_index, raw_data.shape[0]): dw = FEI4Record(raw_data[index], chip_flavor=flavor, tdc_trig_dist=tdc_trig_dist, trigger_data_mode=trigger_data_mode) if dw in select: print index + index_offset, '{0:12d} {1:08b} {2:08b} {3:08b} {4:08b}'.format(raw_data[index], (raw_data[index] & 0xFF000000) >> 24, (raw_data[index] & 0x00FF0000) >> 16, (raw_data[index] & 0x0000FF00) >> 8, (raw_data[index] & 0x000000FF) >> 0), dw total_words += 1 if limit and total_words >= limit: break return total_words
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def interval_timed(interval): '''Interval timer decorator. Taken from: http://stackoverflow.com/questions/12435211/python-threading-timer-repeat-function-every-n-seconds/12435256 ''' def decorator(f): @wraps(f) def wrapper(*args, **kwargs): stopped = Event() def loop(): # executed in another thread while not stopped.wait(interval): # until stopped f(*args, **kwargs) t = Thread(name='IntervalTimerThread', target=loop) t.daemon = True # stop if the program exits t.start() return stopped.set return wrapper return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def interval_timer(interval, func, *args, **kwargs): '''Interval timer function. Taken from: http://stackoverflow.com/questions/22498038/improvement-on-interval-python/22498708 ''' stopped = Event() def loop(): while not stopped.wait(interval): # the first call is after interval func(*args, **kwargs) Thread(name='IntervalTimerThread', target=loop).start() return stopped.set
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def send_mail(subject, body, smtp_server, user, password, from_addr, to_addrs): ''' Sends a run status mail with the traceback to a specified E-Mail address if a run crashes. ''' logging.info('Send status E-Mail (' + subject + ')') content = string.join(( "From: %s" % from_addr, "To: %s" % ','.join(to_addrs), # comma separated according to RFC822 "Subject: %s" % subject, "", body), "\r\n") server = smtplib.SMTP_SSL(smtp_server) server.login(user, password) server.sendmail(from_addr, to_addrs, content) server.quit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _parse_module_cfgs(self): ''' Extracts the configuration of the modules. ''' # Adding here default run config parameters. if "dut" not in self._conf or self._conf["dut"] is None: raise ValueError('Parameter "dut" not defined.') if "dut_configuration" not in self._conf or self._conf["dut_configuration"] is None: raise ValueError('Parameter "dut_configuration" not defined.') self._conf.setdefault('working_dir', None) # string, if None, absolute path of configuration.yaml file will be used if 'modules' in self._conf and self._conf['modules']: for module_id, module_cfg in [(key, value) for key, value in self._conf['modules'].items() if ("activate" not in value or ("activate" in value and value["activate"] is True))]: # Check here for missing module config items. # Capital letter keys are Basil drivers, other keys are parameters. # FIFO, RX, TX, TLU and TDC are generic driver names which are used in the scan implementations. # The use of these reserved driver names allows for abstraction. # Accessing Basil drivers with real name is still possible. if "module_group" in module_id: raise ValueError('The module ID "%s" contains the reserved name "module_group".' % module_id) if "flavor" not in module_cfg or module_cfg["flavor"] is None: raise ValueError('No parameter "flavor" defined for module "%s".' % module_id) if module_cfg["flavor"] in fe_flavors: for driver_name in _reserved_driver_names: # TDC is not mandatory if driver_name == "TDC": # TDC is allowed to have set None module_cfg.setdefault('TDC', None) continue if driver_name not in module_cfg or module_cfg[driver_name] is None: raise ValueError('No parameter "%s" defined for module "%s".' % (driver_name, module_id)) if "rx_channel" not in module_cfg or module_cfg["rx_channel"] is None: raise ValueError('No parameter "rx_channel" defined for module "%s".' % module_id) if "tx_channel" not in module_cfg or module_cfg["tx_channel"] is None: raise ValueError('No parameter "tx_channel" defined for module "%s".' % module_id) if "chip_address" not in module_cfg: raise ValueError('No parameter "chip_address" defined for module "%s".' % module_id) module_cfg.setdefault("tdc_channel", None) module_cfg.setdefault("configuration", None) # string or number, if None, using the last valid configuration module_cfg.setdefault("send_data", None) # address string of PUB socket module_cfg.setdefault("activate", True) # set module active by default # Save config to dict. self._module_cfgs[module_id] = module_cfg self._modules[module_id] = [module_id] else: raise ValueError("No module configuration specified")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def close(self): '''Releasing hardware resources. ''' try: self.dut.close() except Exception: logging.warning('Closing DUT was not successful') else: logging.debug('Closed DUT')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def handle_data(self, data, new_file=False, flush=True): '''Handling of the data. Parameters ---------- data : list, tuple Data tuple of the format (data (np.array), last_time (float), curr_time (float), status (int)) ''' for i, module_id in enumerate(self._selected_modules): if data[i] is None: continue self._raw_data_files[module_id].append(data_iterable=data[i], scan_parameters=self._scan_parameters[module_id]._asdict(), new_file=new_file, flush=flush)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def handle_err(self, exc): '''Handling of Exceptions. Parameters ---------- exc : list, tuple Information of the exception of the format (type, value, traceback). Uses the return value of sys.exc_info(). ''' if self.reset_rx_on_error and isinstance(exc[1], (RxSyncError, EightbTenbError)): self.fifo_readout.print_readout_status() self.fifo_readout.reset_rx() else: # print just the first error massage if not self.abort_run.is_set(): self.abort(msg=exc[1].__class__.__name__ + ": " + str(exc[1])) self.err_queue.put(exc)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_configuration(self, module_id, run_number=None): ''' Returns the configuration for a given module ID. The working directory is searched for a file matching the module_id with the given run number. If no run number is defined the last successfull run defines the run number. ''' def find_file(run_number): module_path = self.get_module_path(module_id) for root, _, files in os.walk(module_path): for cfgfile in files: cfg_root, cfg_ext = os.path.splitext(cfgfile) if cfg_root.startswith(''.join([str(run_number), '_', module_id])) and cfg_ext.endswith(".cfg"): return os.path.join(root, cfgfile) if not run_number: run_numbers = sorted(self._get_run_numbers(status='FINISHED').keys(), reverse=True) found_fin_run_cfg = True if not run_numbers: return None last_fin_run = run_numbers[0] for run_number in run_numbers: cfg_file = find_file(run_number) if cfg_file: if not found_fin_run_cfg: logging.warning("Module '%s' has no configuration for run %d, use config of run %d", module_id, last_fin_run, run_number) return cfg_file else: found_fin_run_cfg = False else: cfg_file = find_file(run_number) if cfg_file: return cfg_file else: raise ValueError('Found no configuration with run number %s' % run_number)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def deselect_module(self): ''' Deselect module and cleanup. ''' self._enabled_fe_channels = [] # ignore any RX sync errors self._readout_fifos = [] self._filter = [] self._converter = [] self.dut['TX']['OUTPUT_ENABLE'] = 0 self._current_module_handle = None if isinstance(current_thread(), _MainThread): current_thread().name = "MainThread"
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def readout(self, *args, **kwargs): ''' Running the FIFO readout while executing other statements. Starting and stopping of the FIFO readout is synchronized between the threads. ''' timeout = kwargs.pop('timeout', 10.0) self.start_readout(*args, **kwargs) try: yield finally: try: self.stop_readout(timeout=timeout) except Exception: # in case something fails, call this on last resort # if run was aborted, immediately stop readout if self.abort_run.is_set(): with self._readout_lock: if self.fifo_readout.is_running: self.fifo_readout.stop(timeout=0.0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def start_readout(self, *args, **kwargs): ''' Starting the FIFO readout. Starting of the FIFO readout is executed only once by a random thread. Starting of the FIFO readout is synchronized between all threads reading out the FIFO. ''' # Pop parameters for fifo_readout.start callback = kwargs.pop('callback', self.handle_data) errback = kwargs.pop('errback', self.handle_err) reset_rx = kwargs.pop('reset_rx', True) reset_fifo = kwargs.pop('reset_fifo', True) fill_buffer = kwargs.pop('fill_buffer', False) no_data_timeout = kwargs.pop('no_data_timeout', None) enabled_fe_channels = kwargs.pop('enabled_fe_channels', self._enabled_fe_channels) if args or kwargs: self.set_scan_parameters(*args, **kwargs) if self._scan_threads and self.current_module_handle not in [t.name for t in self._scan_threads]: raise RuntimeError('Thread name "%s" is not valid.' % t.name) if self._scan_threads and self.current_module_handle in self._curr_readout_threads: raise RuntimeError('Thread "%s" is already actively reading FIFO.') with self._readout_lock: self._curr_readout_threads.append(self.current_module_handle) self._starting_readout_event.clear() while not self._starting_readout_event.wait(0.01): if self.abort_run.is_set(): break with self._readout_lock: if len(set(self._curr_readout_threads) & set([t.name for t in self._scan_threads if t.is_alive()])) == len(set([t.name for t in self._scan_threads if t.is_alive()])) or not self._scan_threads: if not self.fifo_readout.is_running: self.fifo_readout.start(fifos=self._selected_fifos, callback=callback, errback=errback, reset_rx=reset_rx, reset_fifo=reset_fifo, fill_buffer=fill_buffer, no_data_timeout=no_data_timeout, filter_func=self._filter, converter_func=self._converter, fifo_select=self._readout_fifos, enabled_fe_channels=enabled_fe_channels) self._starting_readout_event.set()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def stop_readout(self, timeout=10.0): ''' Stopping the FIFO readout. Stopping of the FIFO readout is executed only once by a random thread. Stopping of the FIFO readout is synchronized between all threads reading out the FIFO. ''' if self._scan_threads and self.current_module_handle not in [t.name for t in self._scan_threads]: raise RuntimeError('Thread name "%s" is not valid.') if self._scan_threads and self.current_module_handle not in self._curr_readout_threads: raise RuntimeError('Thread "%s" is not reading FIFO.') with self._readout_lock: self._curr_readout_threads.remove(self.current_module_handle) self._stopping_readout_event.clear() while not self._stopping_readout_event.wait(0.01): with self._readout_lock: if len(set(self._curr_readout_threads) & set([t.name for t in self._scan_threads if t.is_alive()])) == 0 or not self._scan_threads or self.abort_run.is_set(): if self.fifo_readout.is_running: self.fifo_readout.stop(timeout=timeout) self._stopping_readout_event.set()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_charge(max_tdc, tdc_calibration_values, tdc_pixel_calibration): # Return the charge from calibration ''' Interpolatet the TDC calibration for each pixel from 0 to max_tdc''' charge_calibration = np.zeros(shape=(80, 336, max_tdc)) for column in range(80): for row in range(336): actual_pixel_calibration = tdc_pixel_calibration[column, row, :] if np.any(actual_pixel_calibration != 0) and np.any(np.isfinite(actual_pixel_calibration)): selected_measurements = np.isfinite(actual_pixel_calibration) # Select valid calibration steps selected_actual_pixel_calibration = actual_pixel_calibration[selected_measurements] selected_tdc_calibration_values = tdc_calibration_values[selected_measurements] interpolation = interp1d(x=selected_actual_pixel_calibration, y=selected_tdc_calibration_values, kind='slinear', bounds_error=False, fill_value=0) charge_calibration[column, row, :] = interpolation(np.arange(max_tdc)) return charge_calibration
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_charge_calibration(calibation_file, max_tdc): ''' Open the hit or calibration file and return the calibration per pixel''' with tb.open_file(calibation_file, mode="r") as in_file_calibration_h5: tdc_calibration = in_file_calibration_h5.root.HitOrCalibration[:, :, :, 1] tdc_calibration_values = in_file_calibration_h5.root.HitOrCalibration.attrs.scan_parameter_values[:] return get_charge(max_tdc, tdc_calibration_values, tdc_calibration)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def addEntry(self): """Add the `Plot pyBAR data`. entry to `Dataset` menu. """
export_icon = QtGui.QIcon() pixmap = QtGui.QPixmap(os.path.join(PLUGINSDIR, 'csv/icons/document-export.png')) export_icon.addPixmap(pixmap, QtGui.QIcon.Normal, QtGui.QIcon.On) self.plot_action = QtGui.QAction( translate('PlotpyBARdata', "Plot data with pyBAR plugin", "Plot data with pyBAR plugin"), self, shortcut=QtGui.QKeySequence.UnknownKey, triggered=self.plot, icon=export_icon, statusTip=translate('PlotpyBARdata', "Plotting of selected data with pyBAR", "Status bar text for the Dataset -> Plot pyBAR data... action")) # Add the action to the Dataset menu menu = self.vtgui.dataset_menu menu.addSeparator() menu.addAction(self.plot_action) # Add the action to the leaf context menu cmenu = self.vtgui.leaf_node_cm cmenu.addSeparator() cmenu.addAction(self.plot_action)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def updateDatasetMenu(self): """Update the `export` QAction when the Dataset menu is pulled down. This method is a slot. See class ctor for details. """
enabled = True current = self.vtgui.dbs_tree_view.currentIndex() if current: leaf = self.vtgui.dbs_tree_model.nodeFromIndex(current) if leaf.node_kind in (u'group', u'root group'): enabled = False self.plot_action.setEnabled(enabled)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot(self): """Export a given dataset to a `CSV` file. This method is a slot connected to the `export` QAction. See the :meth:`addEntry` method for details. """
# The PyTables node tied to the current leaf of the databases tree current = self.vtgui.dbs_tree_view.currentIndex() leaf = self.vtgui.dbs_tree_model.nodeFromIndex(current).node data_name = leaf.name hists_1d = ['HistRelBcid', 'HistErrorCounter', 'HistTriggerErrorCounter', 'HistServiceRecord', 'HistTot', 'HistTdc', 'HistClusterTot', 'HistClusterSize'] hists_2d = ['HistOcc', 'Enable', 'Imon', 'C_High', 'EnableDigInj', 'C_Low', 'FDAC', 'TDAC', 'HistTdcPixel', 'HistTotPixel', 'HistThreshold', 'HistNoise', 'HistThresholdFitted', 'HistNoiseFitted', 'HistThresholdFittedCalib', 'HistNoiseFittedCalib'] if data_name in hists_1d: plot_1d_hist(hist=leaf[:], title=data_name) elif data_name in hists_2d: if data_name == 'HistOcc': leaf = np.sum(leaf[:], axis=2) plot_2d_hist(hist=leaf[:], title=data_name) elif 'Table' in str(type(leaf)) and len(leaf[:].dtype.names) <= 3: # detect tables with less than 4 columns plot_table(leaf[:], title=data_name) elif data_name == 'HitOrCalibration': print 'Comming soon' else: print 'Plotting', data_name, '(%s) is not supported!' % type(leaf)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def send_meta_data(socket, conf, name): '''Sends the config via ZeroMQ to a specified socket. Is called at the beginning of a run and when the config changes. Conf can be any config dictionary. ''' meta_data = dict( name=name, conf=conf ) try: socket.send_json(meta_data, flags=zmq.NOBLOCK) except zmq.Again: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def save_raw_data_from_data_queue(data_queue, filename, mode='a', title='', scan_parameters=None): # mode="r+" to append data, raw_data_file_h5 must exist, "w" to overwrite raw_data_file_h5, "a" to append data, if raw_data_file_h5 does not exist it is created '''Writing raw data file from data queue If you need to write raw data once in a while this function may make it easy for you. ''' if not scan_parameters: scan_parameters = {} with open_raw_data_file(filename, mode='a', title='', scan_parameters=list(dict.iterkeys(scan_parameters))) as raw_data_file: raw_data_file.append(data_queue, scan_parameters=scan_parameters)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def worker(self, fifo): '''Worker thread continuously filtering and converting data when data becomes available. ''' logging.debug('Starting worker thread for %s', fifo) self._fifo_conditions[fifo].acquire() while True: try: data_tuple = self._fifo_data_deque[fifo].popleft() except IndexError: self._fifo_conditions[fifo].wait(self.readout_interval) # sleep a little bit, reducing CPU usage else: if data_tuple is None: # if None then exit break else: for index, (filter_func, converter_func, fifo_select) in enumerate(izip(self.filter_func, self.converter_func, self.fifo_select)): if fifo_select is None or fifo_select == fifo: # filter and do the conversion converted_data_tuple = convert_data_iterable((data_tuple,), filter_func=filter_func, converter_func=converter_func)[0] n_data_words = converted_data_tuple[0].shape[0] with self.data_words_per_second_lock: self._words_per_read[index].append((n_data_words, converted_data_tuple[1], converted_data_tuple[2])) self._data_deque[index].append(converted_data_tuple) with self._data_conditions[index]: self._data_conditions[index].notify_all() for index, fifo_select in enumerate(self.fifo_select): if fifo_select is None or fifo_select == fifo: self._data_deque[index].append(None) with self._data_conditions[index]: self._data_conditions[index].notify_all() self._fifo_conditions[fifo].release() logging.debug('Stopping worker thread for %s', fifo)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def writer(self, index, no_data_timeout=None): '''Writer thread continuously calling callback function for writing data when data becomes available. ''' is_fe_data_header = logical_and(is_fe_word, is_data_header) logging.debug('Starting writer thread with index %d', index) self._data_conditions[index].acquire() time_last_data = time() time_write = time() converted_data_tuple_list = [None] * len(self.filter_func) # callback function gets a list of lists of tuples while True: try: if no_data_timeout and time_last_data + no_data_timeout < time(): raise NoDataTimeout('Received no data for %0.1f second(s) for writer thread with index %d' % (no_data_timeout, index)) converted_data_tuple = self._data_deque[index].popleft() except NoDataTimeout: # no data timeout no_data_timeout = None # raise exception only once if self.errback: self.errback(sys.exc_info()) else: raise except IndexError: # no data in queue self._data_conditions[index].wait(self.readout_interval) # sleep a little bit, reducing CPU usage else: if converted_data_tuple is None: # if None then write and exit if self.callback and any(converted_data_tuple_list): try: self.callback(converted_data_tuple_list) except Exception: self.errback(sys.exc_info()) break else: if no_data_timeout and np.any(is_fe_data_header(converted_data_tuple[0])): # check for FEI4 data words time_last_data = time() if converted_data_tuple_list[index]: converted_data_tuple_list[index].append(converted_data_tuple) else: converted_data_tuple_list[index] = [converted_data_tuple] # adding iterable if self.fill_buffer: self._data_buffer[index].append(converted_data_tuple) # check if calling the callback function is about time if self.callback and any(converted_data_tuple_list) and ((self.write_interval and time() - time_write >= self.write_interval) or not self.write_interval): try: self.callback(converted_data_tuple_list) # callback function gets a list of lists of tuples except Exception: self.errback(sys.exc_info()) else: converted_data_tuple_list = [None] * len(self.filter_func) time_write = time() # update last write timestamp self._data_conditions[index].release() logging.debug('Stopping writer thread with index %d', index)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iterable(item): """generate iterable from item, but leaves out strings """
if isinstance(item, collections.Iterable) and not isinstance(item, basestring): return item else: return [item]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def natsorted(seq, cmp=natcmp): "Returns a copy of seq, sorted by natural string sort." import copy temp = copy.copy(seq) natsort(temp, cmp) return temp
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_float_time(): '''returns time as double precision floats - Time64 in pytables - mapping to and from python datetime's ''' t1 = time.time() t2 = datetime.datetime.fromtimestamp(t1) return time.mktime(t2.timetuple()) + 1e-6 * t2.microsecond
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def groupby_dict(dictionary, key): ''' Group dict of dicts by key. ''' return dict((k, list(g)) for k, g in itertools.groupby(sorted(dictionary.keys(), key=lambda name: dictionary[name][key]), key=lambda name: dictionary[name][key]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def find_file_dir_up(filename, path=None, n=None): '''Finding file in directory upwards. ''' if path is None: path = os.getcwd() i = 0 while True: current_path = path for _ in range(i): current_path = os.path.split(current_path)[0] if os.path.isfile(os.path.join(current_path, filename)): # found file and return return os.path.join(current_path, filename) elif os.path.dirname(current_path) == current_path: # root of filesystem return elif n is not None and i == n: return else: # file not found i += 1 continue
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def set_standard_settings(self): '''Set all settings to their standard values. ''' if self.is_open(self.out_file_h5): self.out_file_h5.close() self.out_file_h5 = None self._setup_clusterizer() self.chunk_size = 3000000 self.n_injections = None self.trig_count = 0 # 0 trig_count = 16 BCID per trigger self.max_tot_value = 13 self.vcal_c0, self.vcal_c1 = None, None self.c_low, self.c_mid, self.c_high = None, None, None self.c_low_mask, self.c_high_mask = None, None self._filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) warnings.simplefilter("ignore", OptimizeWarning) self.meta_event_index = None self.fei4b = False self.create_hit_table = False self.create_empty_event_hits = False self.create_meta_event_index = True self.create_tot_hist = True self.create_mean_tot_hist = False self.create_tot_pixel_hist = True self.create_rel_bcid_hist = True self.correct_corrupted_data = False self.create_error_hist = True self.create_service_record_hist = True self.create_occupancy_hist = True self.create_meta_word_index = False self.create_source_scan_hist = False self.create_tdc_hist = False self.create_tdc_counter_hist = False self.create_tdc_pixel_hist = False self.create_trigger_error_hist = False self.create_threshold_hists = False self.create_threshold_mask = True # Threshold/noise histogram mask: masking all pixels out of bounds self.create_fitted_threshold_mask = True # Fitted threshold/noise histogram mask: masking all pixels out of bounds self.create_fitted_threshold_hists = False self.create_cluster_hit_table = False self.create_cluster_table = False self.create_cluster_size_hist = False self.create_cluster_tot_hist = False self.align_at_trigger = False # use the trigger word to align the events self.align_at_tdc = False # use the trigger word to align the events self.trigger_data_format = 0 # 0: 31bit trigger number, 1: 31bit trigger time stamp, 2: 15bit trigger time stamp + 16bit trigger number self.use_tdc_trigger_time_stamp = False # the tdc time stamp is the difference between trigger and tdc rising edge self.max_tdc_delay = 255 self.max_trigger_number = 2 ** 16 - 1 self.set_stop_mode = False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def max_tot_value(self, value): """Set maximum ToT value that is considered to be a hit"""
self._max_tot_value = value self.interpreter.set_max_tot(self._max_tot_value) self.histogram.set_max_tot(self._max_tot_value) self.clusterizer.set_max_hit_charge(self._max_tot_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_fields(self, result, field_name): """ If Schema access, parse fields and build respective lists """
field_list = [] for key, value in result.get('schema', {}).get(field_name, {}).items(): if key not in field_list: field_list.append(key) return field_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_fields(self): """ Builds a list of valid fields """
declared_fields = self.solr._send_request('get', ADMIN_URL) result = decoder.decode(declared_fields) self.field_list = self._parse_fields(result, 'fields') # Build regular expressions to match dynamic fields. # dynamic field names may have exactly one wildcard, either at # the beginning or the end of the name self._dynamic_field_regexes = [] for wc_pattern in self._parse_fields(result, 'dynamicFields'): if wc_pattern[0] == "*": self._dynamic_field_regexes.append( re.compile(".*%s\Z" % wc_pattern[1:])) elif wc_pattern[-1] == "*": self._dynamic_field_regexes.append( re.compile("\A%s.*" % wc_pattern[:-1]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _clean_doc(self, doc, namespace, timestamp): """Reformats the given document before insertion into Solr. This method reformats the document in the following ways: - removes extraneous fields that aren't defined in schema.xml - unwinds arrays in order to find and later flatten sub-documents - flattens the document so that there are no sub-documents, and every value is associated with its dot-separated path of keys - inserts namespace and timestamp metadata into the document in order to handle rollbacks An example: {"a": 2, "b": { "c": { "d": 5 } }, "e": [6, 7, 8] } becomes: {"a": 2, "b.c.d": 5, "e.0": 6, "e.1": 7, "e.2": 8} """
# Translate the _id field to whatever unique key we're using. # _id may not exist in the doc, if we retrieved it from Solr # as part of update. if '_id' in doc: doc[self.unique_key] = u(doc.pop("_id")) # Update namespace and timestamp metadata if 'ns' in doc or '_ts' in doc: raise errors.OperationFailed( 'Need to set "ns" and "_ts" fields, but these fields already ' 'exist in the document %r!' % doc) doc['ns'] = namespace doc['_ts'] = timestamp # SOLR cannot index fields within sub-documents, so flatten documents # with the dot-separated path to each value as the respective key flat_doc = self._formatter.format_document(doc) # Only include fields that are explicitly provided in the # schema or match one of the dynamic field patterns, if # we were able to retrieve the schema if len(self.field_list) + len(self._dynamic_field_regexes) > 0: def include_field(field): return field in self.field_list or any( regex.match(field) for regex in self._dynamic_field_regexes ) return dict((k, v) for k, v in flat_doc.items() if include_field(k)) return flat_doc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apply_update(self, doc, update_spec): """Override DocManagerBase.apply_update to have flat documents."""
# Replace a whole document if not '$set' in update_spec and not '$unset' in update_spec: # update_spec contains the new document. # Update the key in Solr based on the unique_key mentioned as # parameter. update_spec['_id'] = doc[self.unique_key] return update_spec for to_set in update_spec.get("$set", []): value = update_spec['$set'][to_set] # Find dotted-path to the value, remove that key from doc, then # put value at key: keys_to_pop = [] for key in doc: if key.startswith(to_set): if key == to_set or key[len(to_set)] == '.': keys_to_pop.append(key) for key in keys_to_pop: doc.pop(key) doc[to_set] = value for to_unset in update_spec.get("$unset", []): # MongoDB < 2.5.2 reports $unset for fields that don't exist within # the document being updated. keys_to_pop = [] for key in doc: if key.startswith(to_unset): if key == to_unset or key[len(to_unset)] == '.': keys_to_pop.append(key) for key in keys_to_pop: doc.pop(key) return doc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def upsert(self, doc, namespace, timestamp): """Update or insert a document into Solr This method should call whatever add/insert/update method exists for the backend engine and add the document in there. The input will always be one mongo document, represented as a Python dictionary. """
if self.auto_commit_interval is not None: self.solr.add([self._clean_doc(doc, namespace, timestamp)], commit=(self.auto_commit_interval == 0), commitWithin=u(self.auto_commit_interval)) else: self.solr.add([self._clean_doc(doc, namespace, timestamp)], commit=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bulk_upsert(self, docs, namespace, timestamp): """Update or insert multiple documents into Solr docs may be any iterable """
if self.auto_commit_interval is not None: add_kwargs = { "commit": (self.auto_commit_interval == 0), "commitWithin": str(self.auto_commit_interval) } else: add_kwargs = {"commit": False} cleaned = (self._clean_doc(d, namespace, timestamp) for d in docs) if self.chunk_size > 0: batch = list(next(cleaned) for i in range(self.chunk_size)) while batch: self.solr.add(batch, **add_kwargs) batch = list(next(cleaned) for i in range(self.chunk_size)) else: self.solr.add(cleaned, **add_kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove(self, document_id, namespace, timestamp): """Removes documents from Solr The input is a python dictionary that represents a mongo document. """
self.solr.delete(id=u(document_id), commit=(self.auto_commit_interval == 0))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _stream_search(self, query): """Helper method for iterating over Solr search results."""
for doc in self.solr.search(query, rows=100000000): if self.unique_key != "_id": doc["_id"] = doc.pop(self.unique_key) yield doc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def search(self, start_ts, end_ts): """Called to query Solr for documents in a time range."""
query = '_ts: [%s TO %s]' % (start_ts, end_ts) return self._stream_search(query)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_last_doc(self): """Returns the last document stored in the Solr engine. """
#search everything, sort by descending timestamp, return 1 row try: result = self.solr.search('*:*', sort='_ts desc', rows=1) except ValueError: return None for r in result: r['_id'] = r.pop(self.unique_key) return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def blockmix_salsa8(BY, Yi, r): '''Blockmix; Used by SMix.''' start = (2 * r - 1) * 16 X = BY[start:start + 16] # BlockMix - 1 for i in xrange(0, 2 * r): # BlockMix - 2 for xi in xrange(0, 16): # BlockMix - 3(inner) X[xi] ^= BY[i * 16 + xi] salsa20_8(X) # BlockMix - 3(outer) aod = Yi + i * 16 # BlockMix - 4 BY[aod:aod + 16] = X[:16] for i in xrange(0, r): # BlockMix - 6 (and below) aos = Yi + i * 32 aod = i * 16 BY[aod:aod + 16] = BY[aos:aos + 16] for i in xrange(0, r): aos = Yi + (i * 2 + 1) * 16 aod = (i + r) * 16 BY[aod:aod + 16] = BY[aos:aos + 16]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def smix(B, Bi, r, N, V, X): '''SMix; a specific case of ROMix. See scrypt.pdf in the links above.''' X[:32 * r] = B[Bi:Bi + 32 * r] # ROMix - 1 for i in xrange(0, N): # ROMix - 2 aod = i * 32 * r # ROMix - 3 V[aod:aod + 32 * r] = X[:32 * r] blockmix_salsa8(X, 32 * r, r) # ROMix - 4 for i in xrange(0, N): # ROMix - 6 j = X[(2 * r - 1) * 16] & (N - 1) # ROMix - 7 for xi in xrange(0, 32 * r): # ROMix - 8(inner) X[xi] ^= V[j * 32 * r + xi] blockmix_salsa8(X, 32 * r, r) # ROMix - 9(outer) B[Bi:Bi + 32 * r] = X[:32 * r]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hash(password, salt, N, r, p, dkLen): """Returns the result of the scrypt password-based key derivation function. Constraints: r * p < (2 ** 30) dkLen <= (((2 ** 32) - 1) * 32 N, r, p must be positive """
# This only matters to Python 3 if not check_bytes(password): raise ValueError('password must be a byte array') if not check_bytes(salt): raise ValueError('salt must be a byte array') # Scrypt implementation. Significant thanks to https://github.com/wg/scrypt if N < 2 or (N & (N - 1)): raise ValueError('Scrypt N must be a power of 2 greater than 1') # A psuedorandom function prf = lambda k, m: hmac.new(key = k, msg = m, digestmod = hashlib.sha256).digest() # convert into integers B = [ get_byte(c) for c in pbkdf2_single(password, salt, p * 128 * r, prf) ] B = [ ((B[i + 3] << 24) | (B[i + 2] << 16) | (B[i + 1] << 8) | B[i + 0]) for i in xrange(0, len(B), 4)] XY = [ 0 ] * (64 * r) V = [ 0 ] * (32 * r * N) for i in xrange(0, p): smix(B, i * 32 * r, r, N, V, XY) # Convert back into bytes Bc = [ ] for i in B: Bc.append((i >> 0) & 0xff) Bc.append((i >> 8) & 0xff) Bc.append((i >> 16) & 0xff) Bc.append((i >> 24) & 0xff) return pbkdf2_single(password, chars_to_bytes(Bc), dkLen, prf)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _load_get_attr(self, name): 'Return an internal attribute after ensuring the headers is loaded if necessary.' if self._mode in _allowed_read and self._N is None: self._read_header() return getattr(self, name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def close(self): '''Close the underlying file. Sets data attribute .closed to True. A closed file cannot be used for further I/O operations. close() may be called more than once without error. Some kinds of file objects (for example, opened by popen()) may return an exit status upon closing.''' if self._mode in _allowed_write and self._valid is None: self._finalize_write() result = self._fp.close() self._closed = True return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def verify_file(fp, password): 'Returns whether a scrypt encrypted file is valid.' sf = ScryptFile(fp = fp, password = password) for line in sf: pass sf.close() return sf.valid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def readline(self, size = None): '''Next line from the decrypted file, as a string. Retain newline. A non-negative size argument limits the maximum number of bytes to return (an incomplete line may be returned then). Return an empty string at EOF.''' if self.closed: raise ValueError('file closed') if self._mode in _allowed_write: raise Exception('file opened for write only') if self._read_finished: return None line = b'' while not line.endswith(b'\n') and not self._read_finished and (size is None or len(line) <= size): line += self.read(1) return line
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _read_header(self): '''Read and parse the header and calculate derived keys.''' try: # Read the entire header header = self._fp.read(96) if len(header) != 96: raise InvalidScryptFileFormat("Incomplete header") # Magic number if header[0:6] != b'scrypt': raise InvalidScryptFileFormat('Invalid magic number").') # Version (we only support 0) version = get_byte(header[6]) if version != 0: raise InvalidScryptFileFormat('Unsupported version (%d)' % version) # Scrypt parameters self._N = 1 << get_byte(header[7]) (self._r, self._p) = struct.unpack('>II', header[8:16]) self._salt = header[16:48] # Generate the key self._key = hash(self._password, self._salt, self._N, self._r, self._p, 64) # Header Checksum checksum = header[48:64] calculate_checksum = hashlib.sha256(header[0:48]).digest()[:16] if checksum != calculate_checksum: raise InvalidScryptFileFormat('Incorrect header checksum') # Stream checksum checksum = header[64:96] self._checksumer = hmac.new(self.key[32:], msg = header[0:64], digestmod = hashlib.sha256) if checksum != self._checksumer.digest(): raise InvalidScryptFileFormat('Incorrect header stream checksum') self._checksumer.update(header[64:96]) # Prepare the AES engine self._crypto = aesctr.AESCounterModeOfOperation(key = self.key[:32]) self._done_header = True except InvalidScryptFileFormat as e: self.close() raise e except Exception as e: self.close() raise InvalidScryptFileFormat('Header error (%s)' % e)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def read(self, size = None): '''Read at most size bytes, returned as a string. If the size argument is negative or omitted, read until EOF is reached. Notice that when in non-blocking mode, less data than what was requested may be returned, even if no size parameter was given.''' if self.closed: raise ValueError('File closed') if self._mode in _allowed_write: raise Exception('File opened for write only') if not self._done_header: self._read_header() # The encrypted file has been entirely read, so return as much as they want # and remove the returned portion from the decrypted buffer if self._read_finished: if size is None: decrypted = self._decrypted_buffer else: decrypted = self._decrypted_buffer[:size] self._decrypted_buffer = self._decrypted[len(decrypted):] return decrypted # Read everything in one chunk if size is None or size < 0: self._encrypted_buffer = self._fp.read() self._read_finished = True else: # We fill the encrypted buffer (keeping it with a minimum of 32 bytes in case of the # end-of-file checksum) and decrypt into a decrypted buffer 1 block at a time while not self._read_finished: # We have enough decrypted bytes (or will after decrypting the encrypted buffer) available = len(self._decrypted_buffer) + len(self._encrypted_buffer) - 32 if available >= size: break # Read a little extra for the possible final checksum data = self._fp.read(BLOCK_SIZE) # No data left; we're done if not data: self._read_finished = True break self._encrypted_buffer += data # Decrypt as much of the encrypted data as possible (leaving the final check sum) safe = self._encrypted_buffer[:-32] self._encrypted_buffer = self._encrypted_buffer[-32:] self._decrypted_buffer += self._crypto.decrypt(safe) self._checksumer.update(safe) # We read all the bytes, only the checksum remains if self._read_finished: self._check_final_checksum(self._encrypted_buffer) # Send back the number of bytes requests and remove them from the buffer decrypted = self._decrypted_buffer[:size] self._decrypted_buffer = self._decrypted_buffer[size:] return decrypted
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _write_header(self): 'Writes the header to the underlying file object.' header = b'scrypt' + CHR0 + struct.pack('>BII', int(math.log(self.N, 2)), self.r, self.p) + self.salt # Add the header checksum to the header checksum = hashlib.sha256(header).digest()[:16] header += checksum # Add the header stream checksum self._checksumer = hmac.new(self.key[32:], msg = header, digestmod = hashlib.sha256) checksum = self._checksumer.digest() header += checksum self._checksumer.update(checksum) # Write the header self._fp.write(header) # Prepare the AES engine self._crypto = aesctr.AESCounterModeOfOperation(key = self.key[:32]) #self._crypto = aes(self.key[:32]) self._done_header = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _finalize_write(self): 'Finishes any unencrypted bytes and writes the final checksum.' # Make sure we have written the header if not self._done_header: self._write_header() # Write the remaining decrypted part to disk block = self._crypto.encrypt(self._decrypted_buffer) self._decrypted = '' self._fp.write(block) self._checksumer.update(block) # Write the final checksum self._fp.write(self._checksumer.digest()) self._valid = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def write(self, str): '''Write string str to the underlying file. Note that due to buffering, flush() or close() may be needed before the file on disk reflects the data written.''' if self.closed: raise ValueError('File closed') if self._mode in _allowed_read: raise Exception('File opened for read only') if self._valid is not None: raise Exception('file already finalized') if not self._done_header: self._write_header() # Encrypt and write the data encrypted = self._crypto.encrypt(str) self._checksumer.update(encrypted) self._fp.write(encrypted)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def removeFile(file): """remove a file"""
if "y" in speech.question("Are you sure you want to remove " + file + "? (Y/N): "): speech.speak("Removing " + file + " with the 'rm' command.") subprocess.call(["rm", "-r", file]) else: speech.speak("Okay, I won't remove " + file + ".")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy(location): """copy file or directory at a given location; can be pasted later"""
copyData = settings.getDataFile() copyFileLocation = os.path.abspath(location) copy = {"copyLocation": copyFileLocation} dataFile = open(copyData, "wb") pickle.dump(copy, dataFile) speech.speak(location + " copied successfully!") speech.speak("Tip: use 'hallie paste' to paste this file.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def paste(location): """paste a file or directory that has been previously copied"""
copyData = settings.getDataFile() if not location: location = "." try: data = pickle.load(open(copyData, "rb")) speech.speak("Pasting " + data["copyLocation"] + " to current directory.") except: speech.fail("It doesn't look like you've copied anything yet.") speech.fail("Type 'hallie copy <file>' to copy a file or folder.") return process, error = subprocess.Popen(["cp", "-r", data["copyLocation"], location], stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate() if "denied" in process: speech.fail("Unable to paste your file successfully. This is most likely due to a permission issue. You can try to run me as sudo!")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_zfs_apt_repository(): """ adds the ZFS repository """
with settings(hide('warnings', 'running', 'stdout'), warn_only=False, capture=True): sudo('DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get update') install_ubuntu_development_tools() apt_install(packages=['software-properties-common', 'dkms', 'linux-headers-generic', 'build-essential']) sudo('echo | add-apt-repository ppa:zfs-native/stable') sudo('DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get update') return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apt_install(**kwargs): """ installs a apt package """
for pkg in list(kwargs['packages']): if is_package_installed(distribution='ubuntu', pkg=pkg) is False: sudo("DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get install -y %s" % pkg) # if we didn't abort above, we should return True return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apt_add_key(keyid, keyserver='keyserver.ubuntu.com', log=False): """ trust a new PGP key related to a apt-repository """
if log: log_green( 'trusting keyid %s from %s' % (keyid, keyserver) ) with settings(hide('warnings', 'running', 'stdout')): sudo('apt-key adv --keyserver %s --recv %s' % (keyserver, keyid)) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def enable_apt_repositories(prefix, url, version, repositories): """ adds an apt repository """
with settings(hide('warnings', 'running', 'stdout'), warn_only=False, capture=True): sudo('apt-add-repository "%s %s %s %s"' % (prefix, url, version, repositories)) with hide('running', 'stdout'): output = sudo("DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get update") if 'Some index files failed to download' in output: raise SystemExit(1) else: # if we didn't abort above, we should return True return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_package_installed(distribution, pkg): """ checks if a particular package is installed """
if ('centos' in distribution or 'el' in distribution or 'redhat' in distribution): return(is_rpm_package_installed(pkg)) if ('ubuntu' in distribution or 'debian' in distribution): return(is_deb_package_installed(pkg))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_rpm_package_installed(pkg): """ checks if a particular rpm package is installed """
with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True, capture=True): result = sudo("rpm -q %s" % pkg) if result.return_code == 0: return True elif result.return_code == 1: return False else: # print error to user print(result) raise SystemExit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def yum_install(**kwargs): """ installs a yum package """
if 'repo' in kwargs: repo = kwargs['repo'] for pkg in list(kwargs['packages']): if is_package_installed(distribution='el', pkg=pkg) is False: if 'repo' in locals(): log_green( "installing %s from repo %s ..." % (pkg, repo)) sudo("yum install -y --quiet --enablerepo=%s %s" % (repo, pkg)) else: log_green("installing %s ..." % pkg) sudo("yum install -y --quiet %s" % pkg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def yum_group_install(**kwargs): """ instals a yum group """
for grp in list(kwargs['groups']): log_green("installing %s ..." % grp) if 'repo' in kwargs: repo = kwargs['repo'] sudo("yum groupinstall -y --quiet " "--enablerepo=%s '%s'" % (repo, grp)) else: sudo("yum groups mark install -y --quiet '%s'" % grp) sudo("yum groups mark convert -y --quiet '%s'" % grp) sudo("yum groupinstall -y --quiet '%s'" % grp)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def recherche(self, pattern, entete, in_all=False): """abstractSearch in fields of collection and reset rendering. Returns number of results. If in_all is True, call get_all before doing the search."""
if in_all: self.collection = self.get_all() self.collection.recherche(pattern, entete) self._reset_render() return len(self.collection)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def launch_background_job(self, job, on_error=None, on_success=None): """Launch the callable job in background thread. Succes or failure are controlled by on_error and on_success """
if not self.main.mode_online: self.sortie_erreur_GUI( "Local mode activated. Can't run background task !") self.reset() return on_error = on_error or self.sortie_erreur_GUI on_success = on_success or self.sortie_standard_GUI def thread_end(r): on_success(r) self.update() def thread_error(r): on_error(r) self.reset() logging.info( f"Launching background task from interface {self.__class__.__name__} ...") th = threads.worker(job, thread_error, thread_end) self._add_thread(th)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filtre(liste_base, criteres) -> groups.Collection: """ Return a filter list, bases on criteres :param liste_base: Acces list """
def choisi(ac): for cat, li in criteres.items(): v = ac[cat] if not (v in li): return False return True return groups.Collection(a for a in liste_base if choisi(a))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load_users(self): """Default implentation requires users from DB. Should setup `users` attribute"""
r = sql.abstractRequetesSQL.get_users()() self.users = {d["id"]: dict(d) for d in r}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_modules(self): """Should instance interfaces and set them to interface, following `modules`"""
if self.INTERFACES_MODULE is None: raise NotImplementedError("A module containing interfaces modules " "should be setup in INTERFACES_MODULE !") else: for module, permission in self.modules.items(): i = getattr(self.INTERFACES_MODULE, module).Interface(self, permission) self.interfaces[module] = i
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_autolog(self, user_id): """ Read auto-connection parameters and returns local password or None """
try: with open("local/init", "rb") as f: s = f.read() s = security.protege_data(s, False) self.autolog = json.loads(s).get("autolog", {}) except FileNotFoundError: return mdp = self.autolog.get(user_id, None) return mdp
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def loggin(self, user_id, mdp, autolog): """Check mdp and return True it's ok"""
r = sql.abstractRequetesSQL.check_mdp_user(user_id, mdp) if r(): # update auto-log params self.autolog[user_id] = autolog and mdp or False self.modules = self.users[user_id]["modules"] # load modules list dic = {"autolog": self.autolog, "modules": self.modules} s = json.dumps(dic, indent=4, ensure_ascii=False) b = security.protege_data(s, True) with open("local/init", "wb") as f: f.write(b) self.mode_online = True # authorization to execute bakground tasks return True else: logging.debug("Bad password !")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def mkpad(items): ''' Find the length of the longest element of a list. Return that value + two. ''' pad = 0 stritems = [str(e) for e in items] # cast list to strings for e in stritems: index = stritems.index(e) if len(stritems[index]) > pad: pad = len(stritems[index]) pad += 2 return pad
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def mkcols(l, rows): ''' Compute the size of our columns by first making them a divisible of our row height and then splitting our list into smaller lists the size of the row height. ''' cols = [] base = 0 while len(l) > rows and len(l) % rows != 0: l.append("") for i in range(rows, len(l) + rows, rows): cols.append(l[base:i]) base = i return cols
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def mkrows(l, pad, width, height): ''' Compute the optimal number of rows based on our lists' largest element and our terminal size in columns and rows. Work out our maximum column number by dividing the width of the terminal by our largest element. While the length of our list is greater than the total number of elements we can fit on the screen increment the height by one. ''' maxcols = int(width/pad) while len(l) > height * maxcols: height += 1 return height
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def prtcols(items, vpad=6): ''' After computing the size of our rows and columns based on the terminal size and length of the largest element, use zip to aggregate our column lists into row lists and then iterate over the row lists and print them. ''' from os import get_terminal_size items = list(items) # copy list so we don't mutate it width, height = get_terminal_size() height -= vpad # customize vertical padding pad = mkpad(items) rows = mkrows(items, pad, width, height) cols = mkcols(items, rows) # * operator in conjunction with zip, unzips the list for c in zip(*cols): row_format = '{:<{pad}}' * len(cols) print(row_format.format(*c, pad=pad))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def return_timer(self, name, status, timer): ''' Return a text formatted timer ''' timer_template = '%s %s %s : %s : %9s' t = str(timedelta(0, timer)).split(',')[-1].strip().split(':') #t = str(timedelta(0, timer)).split(':') if len(t) == 4: h, m, s = int(t[0])*24 + int(t[1]), int(t[2]), float(t[3]) elif len(t) == 3: h, m, s = int(t[0]), int(t[1]), float(t[2]) else: h, m, s = 0, 0, str(t) return timer_template%( name[:20].ljust(20), status[:7].ljust(7), '%3d'%h if h != 0 else ' --', '%2d'%m if m != 0 else '--', '%.6f'%s if isinstance(s, float) else s )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def print_timers(self): ''' PRINT EXECUTION TIMES FOR THE LIST OF PROGRAMS ''' self.timer += time() total_time = self.timer tmp = '* %s *' debug.log( '', '* '*29, tmp%(' '*51), tmp%('%s %s %s'%('Program Name'.ljust(20), 'Status'.ljust(7), 'Execute Time (H:M:S)')), tmp%('='*51) ) for name in self.list: if self.exists(name): timer = getattr(self, name).get_time() status = getattr(self, name).get_status() self.timer -= timer debug.log(tmp%(self.return_timer(name, status, timer))) else: debug.log(tmp%("%s %s -- : -- : --"%(name[:20].ljust(20),' '*8))) debug.log( tmp%(self.return_timer('Wrapper', '', self.timer)), tmp%('='*51), tmp%(self.return_timer('Total', '', total_time)), tmp%(' '*51), '* '*29, '' )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_cmd(self): """ This function combines and return the commanline call of the program. """
cmd = [] if self.path is not None: if '/' in self.path and not os.path.exists(self.path): debug.log('Error: path contains / but does not exist: %s'%self.path) else: if self.ptype is not None: if os.path.exists(self.ptype): cmd.append(self.ptype) elif '/' not in self.ptype: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') ppath = os.path.join(path, self.ptype) if os.path.isfile(ppath): cmd.append(ppath) break cmd.append(self.path) if sys.version_info < (3, 0): cmd.extend([str(x) if not isinstance(x, (unicode)) else x.encode('utf-8') for x in [quote(str(x)) for x in self.args]+self.unquoted_args]) else: cmd.extend([str(x) for x in [quote(str(x)) for x in self.args]+self.unquoted_args]) else: debug.log('Error: Program path not set!') return ' '.join(cmd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def append_args(self, arg): """ This function appends the provided arguments to the program object. """
debug.log("Adding Arguments: %s"%(arg)) if isinstance(arg, (int,float)): self.args.append(str(arg)) if isinstance(arg, str): self.args.append(arg) if isinstance(arg, list): if sys.version_info < (3, 0): self.args.extend([str(x) if not isinstance(x, (unicode)) else x.encode('utf-8') for x in arg]) else: self.args.extend([str(x) for x in arg])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def print_stdout(self): """ This function will read the standard out of the program and print it """
# First we check if the file we want to print does exists if self.wdir != '': stdout = "%s/%s"%(self.wdir, self.stdout) else: stdout = self.stdout if os.path.exists(stdout): with open_(stdout, 'r') as f: debug.print_out("\n".join([line for line in f])) else: # FILE DOESN'T EXIST debug.log("Error: The stdout file %s does not exist!"%(stdout))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_out_var(self, varnames=[]): """ This function will read the standard out of the program, catch variables and return the values EG. #varname=value """
if self.wdir != '': stdout = "%s/%s"%(self.wdir, self.stdout) else: stdout = self.stdout response = [None]*len(varnames) # First we check if the file we want to print does exists if os.path.exists(stdout): with open_(stdout, 'r') as f: for line in f: if '=' in line: var = line.strip('#').split('=') value = var[1].strip() var = var[0].strip() if var in varnames: response[varnames.index(var)] = value else: # FILE DOESN'T EXIST debug.log("Error: The stdout file %s does not exist!"%(stdout)) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reporter(self): """ Runs the necessary methods to parse raw read outputs """
logging.info('Preparing reports') # Populate self.plusdict in order to reuse parsing code from an assembly-based method for sample in self.runmetadata.samples: self.plusdict[sample.name] = dict() self.matchdict[sample.name] = dict() if sample.general.bestassemblyfile != 'NA': for gene in sample[self.analysistype].allelenames: self.plusdict[sample.name][gene] = dict() for allele, percentidentity in sample[self.analysistype].results.items(): if gene in allele: # Split the allele number from the gene name using the appropriate delimiter if '_' in allele: splitter = '_' elif '-' in allele: splitter = '-' else: splitter = '' self.matchdict[sample.name].update({gene: allele.split(splitter)[-1]}) # Create the plusdict dictionary as in the assembly-based (r)MLST method. Allows all the # parsing and sequence typing code to be reused. try: self.plusdict[sample.name][gene][allele.split(splitter)[-1]][percentidentity] \ = sample[self.analysistype].avgdepth[allele] except KeyError: self.plusdict[sample.name][gene][allele.split(splitter)[-1]] = dict() self.plusdict[sample.name][gene][allele.split(splitter)[-1]][percentidentity] \ = sample[self.analysistype].avgdepth[allele] if gene not in self.matchdict[sample.name]: self.matchdict[sample.name].update({gene: 'N'}) self.profiler() self.sequencetyper() self.mlstreporter()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def report_parse(self): """ If the pipeline has previously been run on these data, instead of reading through the results, parse the report instead """
# Initialise lists report_strains = list() genus_list = list() if self.analysistype == 'mlst': for sample in self.runmetadata.samples: try: genus_list.append(sample.general.referencegenus) except AttributeError: sample.general.referencegenus = 'ND' genus_list.append(sample.general.referencegenus) # Read in the report if self.analysistype == 'mlst': for genus in genus_list: try: report_name = os.path.join(self.reportpath, '{at}_{genus}.csv'.format(at=self.analysistype, genus=genus)) report_strains = self.report_read(report_strains=report_strains, report_name=report_name) except FileNotFoundError: report_name = self.report report_strains = self.report_read(report_strains=report_strains, report_name=report_name) else: report_name = self.report report_strains = self.report_read(report_strains=report_strains, report_name=report_name) # Populate strains not in the report with 'empty' GenObject with appropriate attributes for sample in self.runmetadata.samples: if sample.name not in report_strains: setattr(sample, self.analysistype, GenObject()) sample[self.analysistype].sequencetype = 'ND' sample[self.analysistype].matches = 0 sample[self.analysistype].results = dict()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def guess_type(filename, **kwargs): """ Utility function to call classes based on filename extension. Just usefull if you are reading the file and don't know file extension. You can pass kwargs and these args are passed to class only if they are used in class. """
extension = os.path.splitext(filename)[1] case = {'.xls': Xls, '.xlsx': Xlsx, '.csv': Csv} if extension and case.get(extension.lower()): low_extension = extension.lower() new_kwargs = dict() class_name = case.get(low_extension) class_kwargs = inspect.getargspec(class_name.__init__).args[1:] for kwarg in kwargs: if kwarg in class_kwargs: new_kwargs[kwarg] = kwargs[kwarg] return case.get(low_extension)(filename, **new_kwargs) else: raise Exception('No extension found')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_gene_seqs(database_path, gene): """ This function takes the database path and a gene name as inputs and returns the gene sequence contained in the file given by the gene name """
gene_path = database_path + "/" + gene + ".fsa" gene_seq = "" # Open fasta file with open(gene_path) as gene_file: header = gene_file.readline() for line in gene_file: seq = line.strip() gene_seq += seq return gene_seq
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_best_sequence(hits_found, specie_path, gene, silent_N_flag): """ This function takes the list hits_found as argument. This contains all hits found for the blast search of one gene. A hit includes the subjct sequence, the query, and the start and stop position of the allignment corresponding to the subject sequence. This function finds the best hit by concatinating sequences of found hits. If different overlap sequences occurr these are saved in the list alternative_overlaps. The subject and query sequence of the concatinated sequence to gether with alternative overlaps and the corresponding start stop positions are returned. """
# Get information from the fisrt hit found all_start = hits_found[0][0] current_end = hits_found[0][1] final_sbjct = hits_found[0][2] final_qry = hits_found[0][3] sbjct_len = hits_found[0][4] alternative_overlaps = [] # Check if more then one hit was found within the same gene for i in range(len(hits_found)-1): # Save information from previous hit pre_block_start = hits_found[i][0] pre_block_end = hits_found[i][1] pre_sbjct = hits_found[i][2] pre_qry = hits_found[i][3] # Save information from next hit next_block_start = hits_found[i+1][0] next_block_end = hits_found[i+1][1] next_sbjct = hits_found[i+1][2] next_qry = hits_found[i+1][3] # Check for overlapping sequences, collaps them and save alternative overlaps if any if next_block_start <= current_end: # Find overlap start and take gaps into account pos_count = 0 overlap_pos = pre_block_start for i in range(len(pre_sbjct)): # Stop loop if overlap_start position is reached if overlap_pos == next_block_start: overlap_start = pos_count break if pre_sbjct[i] != "-": overlap_pos += 1 pos_count += 1 # Find overlap length and add next sequence to final sequence if len(pre_sbjct[overlap_start:]) > len(next_sbjct): # <---------> # <---> overlap_len = len(next_sbjct) overlap_end_pos = next_block_end else: # <---------> # <---------> overlap_len = len(pre_sbjct[overlap_start:]) overlap_end_pos = pre_block_end # Update current end current_end = next_block_end # Use the entire pre sequence and add the last part of the next sequence final_sbjct += next_sbjct[overlap_len:] final_qry += next_qry[overlap_len:] # Find query overlap sequences pre_qry_overlap = pre_qry[overlap_start : (overlap_start + overlap_len)] # can work for both types of overlap next_qry_overlap = next_qry[:overlap_len] sbjct_overlap = next_sbjct[:overlap_len] # If alternative query overlap excist save it if pre_qry_overlap != next_qry_overlap: print("OVERLAP WARNING:") print(pre_qry_overlap, "\n", next_qry_overlap) # Save alternative overlaps alternative_overlaps += [(next_block_start, overlap_end_pos, sbjct_overlap, next_qry_overlap)] elif next_block_start > current_end: # <-------> # <-------> gap_size = next_block_start - current_end - 1 final_qry += "N"*gap_size if silent_N_flag: final_sbjct += "N"*gap_size else: ref_seq = get_gene_seqs(specie_path, gene) final_sbjct += ref_seq[pre_block_end:pre_block_end+gap_size] current_end = next_block_end final_sbjct += next_sbjct final_qry += next_qry # Calculate coverage no_call = final_qry.upper().count("N") coverage = (current_end - all_start +1 - no_call) / float(sbjct_len) # Calculate identity equal = 0 not_equal = 0 for i in range(len(final_qry)): if final_qry[i].upper() != "N": if final_qry[i].upper() == final_sbjct[i].upper(): equal += 1 else: not_equal += 1 identity = equal/float(equal + not_equal) return final_sbjct, final_qry, all_start, current_end, alternative_overlaps, coverage, identity
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_mismatches(gene, sbjct_start, sbjct_seq, qry_seq, alternative_overlaps = []): """ This function finds mis matches between two sequeces. Depending on the the sequence type either the function find_codon_mismatches or find_nucleotid_mismatches are called, if the sequences contains both a promoter and a coding region both functions are called. The function can also call it self if alternative overlaps is give. All found mis matches are returned """
# Initiate the mis_matches list that will store all found mis matcehs mis_matches = [] # Find mis matches in RNA genes if gene in RNA_gene_list: mis_matches += find_nucleotid_mismatches(sbjct_start, sbjct_seq, qry_seq) else: # Check if the gene sequence is with a promoter regex = r"promoter_size_(\d+)(?:bp)" promtr_gene_objt = re.search(regex, gene) # Check for promoter sequences if promtr_gene_objt: # Get promoter length promtr_len = int(promtr_gene_objt.group(1)) # Extract promoter sequence, while considering gaps # --------agt-->---- # ---->? if sbjct_start <= promtr_len: #Find position in sbjct sequence where promoter ends promtr_end = 0 nuc_count = sbjct_start - 1 for i in range(len(sbjct_seq)): promtr_end += 1 if sbjct_seq[i] != "-": nuc_count += 1 if nuc_count == promtr_len: break # Check if only a part of the promoter is found #--------agt-->---- # ---- promtr_sbjct_start = -1 if nuc_count < promtr_len: promtr_sbjct_start = nuc_count - promtr_len # Get promoter part of subject and query sbjct_promtr_seq = sbjct_seq[:promtr_end] qry_promtr_seq = qry_seq[:promtr_end] # For promoter part find nucleotide mis matches mis_matches += find_nucleotid_mismatches(promtr_sbjct_start, sbjct_promtr_seq, qry_promtr_seq, promoter = True) # Check if gene is also found #--------agt-->---- # ----------- if (sbjct_start + len(sbjct_seq.replace("-", ""))) > promtr_len: sbjct_gene_seq = sbjct_seq[promtr_end:] qry_gene_seq = qry_seq[promtr_end:] sbjct_gene_start = 1 # Find mismatches in gene part mis_matches += find_codon_mismatches(sbjct_gene_start, sbjct_gene_seq, qry_gene_seq) # No promoter, only gene is found #--------agt-->---- # ----- else: sbjct_gene_start = sbjct_start - promtr_len # Find mismatches in gene part mis_matches += find_codon_mismatches(sbjct_gene_start, sbjct_seq, qry_seq) else: # Find mismatches in gene mis_matches += find_codon_mismatches(sbjct_start, sbjct_seq, qry_seq) # Find mismatches in alternative overlaps if any for overlap in alternative_overlaps: mis_matches += find_mismatches(gene, overlap[0], overlap[2], overlap[3]) return mis_matches
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_nuc_indel(gapped_seq, indel_seq): """ This function finds the entire indel missing in from a gapped sequence compared to the indel_seqeunce. It is assumes that the sequences start with the first position of the gap. """
ref_indel = indel_seq[0] for j in range(1,len(gapped_seq)): if gapped_seq[j] == "-": ref_indel += indel_seq[j] else: break return ref_indel
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def aa(codon): """ This function converts a codon to an amino acid. If the codon is not valid an error message is given, or else, the amino acid is returned. """
codon = codon.upper() aa = {"ATT": "I", "ATC": "I", "ATA": "I", "CTT": "L", "CTC": "L", "CTA": "L", "CTG": "L", "TTA": "L", "TTG": "L", "GTT": "V", "GTC": "V", "GTA": "V", "GTG": "V", "TTT": "F", "TTC": "F", "ATG": "M", "TGT": "C", "TGC": "C", "GCT": "A", "GCC": "A", "GCA": "A", "GCG": "A", "GGT": "G", "GGC": "G", "GGA": "G", "GGG": "G", "CCT": "P", "CCC": "P", "CCA": "P", "CCG": "P", "ACT": "T", "ACC": "T", "ACA": "T", "ACG": "T", "TCT": "S", "TCC": "S", "TCA": "S", "TCG": "S", "AGT": "S", "AGC": "S", "TAT": "Y", "TAC": "Y", "TGG": "W", "CAA": "Q", "CAG": "Q", "AAT": "N", "AAC": "N", "CAT": "H", "CAC": "H", "GAA": "E", "GAG": "E", "GAT": "D", "GAC": "D", "AAA": "K", "AAG": "K", "CGT": "R", "CGC": "R", "CGA": "R", "CGG": "R", "AGA": "R", "AGG": "R", "TAA": "*", "TAG": "*", "TGA": "*"} # Translate valid codon try: amino_a = aa[codon] except KeyError: amino_a = "?" return amino_a
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_codon(seq, codon_no, start_offset): """ This function takes a sequece and a codon number and returns the codon found in the sequence at that position """
seq = seq.replace("-","") codon_start_pos = int(codon_no - 1)*3 - start_offset codon = seq[codon_start_pos:codon_start_pos + 3] return codon
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def name_insertion(sbjct_seq, codon_no, sbjct_nucs, aa_alt, start_offset): """ This function is used to name a insertion mutation based on the HGVS recommendation. """
start_codon_no = codon_no - 1 if len(sbjct_nucs) == 3: start_codon_no = codon_no start_codon = get_codon(sbjct_seq, start_codon_no, start_offset) end_codon = get_codon(sbjct_seq, codon_no, start_offset) pos_name = "p.%s%d_%s%dins%s"%(aa(start_codon), start_codon_no, aa(end_codon), codon_no, aa_alt) return pos_name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, start_offset): """ This function serves to name the individual mutations dependently on the type of the mutation. """
# Get the subject and query sequences without gaps sbjct_nucs = sbjct_rf_indel.replace("-", "") qry_nucs = qry_rf_indel.replace("-", "") # Translate nucleotides to amino acids aa_ref = "" aa_alt = "" for i in range(0, len(sbjct_nucs), 3): aa_ref += aa(sbjct_nucs[i:i+3]) for i in range(0, len(qry_nucs), 3): aa_alt += aa(qry_nucs[i:i+3]) # Identify the gapped sequence if mut == "ins": gapped_seq = sbjct_rf_indel else: gapped_seq = qry_rf_indel gap_size = gapped_seq.count("-") # Write mutation names if gap_size < 3 and len(sbjct_nucs) ==3 and len(qry_nucs) == 3: # Write mutation name for substitution mutation mut_name = "p.%s%d%s"%(aa(sbjct_nucs), codon_no, aa(qry_nucs)) elif len(gapped_seq) == gap_size: if mut == "ins": # Write mutation name for insertion mutation mut_name = name_insertion(sbjct_seq, codon_no, sbjct_nucs, aa_alt, start_offset) aa_ref = mut else: # Write mutation name for deletion mutation mut_name = name_deletion(sbjct_seq, sbjct_rf_indel, sbjct_nucs, codon_no, aa_alt, start_offset, mutation = "del") aa_alt = mut # Check for delins - mix of insertion and deletion else: # Write mutation name for a mixed insertion and deletion mutation mut_name = name_deletion(sbjct_seq, sbjct_rf_indel, sbjct_nucs, codon_no, aa_alt, start_offset, mutation = "delins") # Check for frameshift if gapped_seq.count("-")%3 != 0: # Add the frameshift tag to mutation name mut_name += " - Frameshift" return mut_name, aa_ref, aa_alt
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_inframe_gap(seq, nucs_needed = 3): """ This funtion takes a sequnece starting with a gap or the complementary seqeuence to the gap, and the number of nucleotides that the seqeunce should contain in order to maintain the correct reading frame. The sequence is gone through and the number of non-gap characters are counted. When the number has reach the number of needed nucleotides the indel is returned. If the indel is a 'clean' insert or deletion that starts in the start of a codon and can be divided by 3, then only the gap is returned. """
nuc_count = 0 gap_indel = "" nucs = "" for i in range(len(seq)): # Check if the character is not a gap if seq[i] != "-": # Check if the indel is a 'clean' # i.e. if the insert or deletion starts at the first nucleotide in the codon and can be divided by 3 if gap_indel.count("-") == len(gap_indel) and gap_indel.count("-") >= 3 and len(gap_indel) != 0: return gap_indel nuc_count += 1 gap_indel += seq[i] # If the number of nucleotides in the indel equals the amount needed for the indel, the indel is returned. if nuc_count == nucs_needed: return gap_indel # This will only happen if the gap is in the very end of a sequence return gap_indel
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge(self): """Try merging all the bravado_core models across all loaded APIs. If duplicates occur, use the same bravado-core model to represent each, so bravado-core won't treat them as different models when passing them from one PyMacaron client stub to an other or when returning them via the PyMacaron server stub. """
# The sole purpose of this method is to trick isinstance to return true # on model_values of the same kind but different apis/specs at: # https://github.com/Yelp/bravado-core/blob/4840a6e374611bb917226157b5948ee263913abc/bravado_core/marshal.py#L160 log.info("Merging models of apis " + ", ".join(apis.keys())) # model_name => (api_name, model_json_def, bravado_core.model.MODELNAME) models = {} # First pass: find duplicate and keep only one model of each (fail if # duplicates have same name but different definitions) for api_name, api in apis.items(): for model_name, model_def in api.api_spec.swagger_dict['definitions'].items(): if model_name in models: other_api_name, other_model_def, _ = models.get(model_name) log.debug("Model %s in %s is a duplicate of one in %s" % (model_name, api_name, other_api_name)) if ApiPool._cmp_models(model_def, other_model_def) != 0: raise MergeApisException("Cannot merge apis! Model %s exists in apis %s and %s but have different definitions:\n[%s]\n[%s]" % (model_name, api_name, other_api_name, pprint.pformat(model_def), pprint.pformat(other_model_def))) else: models[model_name] = (api_name, model_def, api.api_spec.definitions[model_name]) # Second pass: patch every models and replace with the one we decided # to keep log.debug("Patching api definitions to remove all duplicates") for api_name, api in apis.items(): for model_name in api.api_spec.definitions.keys(): _, _, model_class = models.get(model_name) api.api_spec.definitions[model_name] = model_class
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create( self, name, command_to_run, description="", environment_variables=None, required_arguments=None, required_arguments_default_values=None, extra_data_to_post=None, ): """Create a task type. Args: name (str): The name of the task. command_to_run (str): The command to run to execute the task. description (str, optional): The description of the task type. environment_variables (list, optional): The environment variables required on the host to execute the task. required_arguments (list, optional): The argument names for the task type. required_arguments_default_values (dict, optional): Default values for the tasks required arguments. extra_data_to_post (dict, optional): Extra key-value pairs to add to the request data. This is useful for subclasses which require extra parameters. Returns: :class:`saltant.models.base_task_instance.BaseTaskType`: A task type model instance representing the task type just created. """
# Set None for optional list and dicts to proper datatypes if environment_variables is None: environment_variables = [] if required_arguments is None: required_arguments = [] if required_arguments_default_values is None: required_arguments_default_values = {} # Create the object request_url = self._client.base_api_url + self.list_url data_to_post = { "name": name, "description": description, "command_to_run": command_to_run, "environment_variables": json.dumps(environment_variables), "required_arguments": json.dumps(required_arguments), "required_arguments_default_values": json.dumps( required_arguments_default_values ), } # Add in extra data if any was passed in if extra_data_to_post is not None: data_to_post.update(extra_data_to_post) response = self._client.session.post(request_url, data=data_to_post) # Validate that the request was successful self.validate_request_success( response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_201_CREATED, ) # Return a model instance representing the task type return self.response_data_to_model_instance(response.json())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def response_data_to_model_instance(self, response_data): """Convert response data to a task type model. Args: response_data (dict): The data from the request's response. Returns: :class:`saltant.models.base_task_type.BaseTaskType`: A model instance representing the task type from the reponse data. """
# Coerce datetime strings into datetime objects response_data["datetime_created"] = dateutil.parser.parse( response_data["datetime_created"] ) # Instantiate a model for the task instance return super( BaseTaskTypeManager, self ).response_data_to_model_instance(response_data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deploy(version): """ Deploy to pypi as specified version. """
NAME = "pathquery" git = Command("git").in_dir(DIR.project) version_file = DIR.project.joinpath("VERSION") old_version = version_file.bytes().decode('utf8') if version_file.bytes().decode("utf8") != version: DIR.project.joinpath("VERSION").write_text(version) git("add", "VERSION").run() git("commit", "-m", "RELEASE: Version {0} -> {1}".format( old_version, version )).run() git("push").run() git("tag", "-a", version, "-m", "Version {0}".format(version)).run() git("push", "origin", version).run() else: git("push").run() # Set __version__ variable in __init__.py, build sdist and put it back initpy = DIR.project.joinpath(NAME, "__init__.py") original_initpy_contents = initpy.bytes().decode('utf8') initpy.write_text( original_initpy_contents.replace("DEVELOPMENT_VERSION", version) ) python("setup.py", "sdist").in_dir(DIR.project).run() initpy.write_text(original_initpy_contents) # Upload to pypi python( "-m", "twine", "upload", "dist/{0}-{1}.tar.gz".format(NAME, version) ).in_dir(DIR.project).run()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hvenvup(package, directory): """ Install a new version of a package in the hitch venv. """
pip = Command(DIR.gen.joinpath("hvenv", "bin", "pip")) pip("uninstall", package, "-y").run() pip("install", DIR.project.joinpath(directory).abspath()).run()