code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def receive_offer(self, pkt): """Receive offer on SELECTING state.""" logger.debug("C2. Received OFFER?, in SELECTING state.") if isoffer(pkt): logger.debug("C2: T, OFFER received") self.offers.append(pkt) if len(self.offers) >= MAX_OFFERS_COLLECTED: logger.debug("C2.5: T, raise REQUESTING.") self.select_offer() raise self.REQUESTING() logger.debug("C2.5: F, raise SELECTING.") raise self.SELECTING()
Receive offer on SELECTING state.
def get_changes(self, factory_name, global_factory=False, resources=None, task_handle=taskhandle.NullTaskHandle()): """Get the changes this refactoring makes `factory_name` indicates the name of the factory function to be added. If `global_factory` is `True` the factory will be global otherwise a static method is added to the class. `resources` can be a list of `rope.base.resource.File`\s that this refactoring should be applied on; if `None` all python files in the project are searched. """ if resources is None: resources = self.project.get_python_files() changes = ChangeSet('Introduce factory method <%s>' % factory_name) job_set = task_handle.create_jobset('Collecting Changes', len(resources)) self._change_module(resources, changes, factory_name, global_factory, job_set) return changes
Get the changes this refactoring makes `factory_name` indicates the name of the factory function to be added. If `global_factory` is `True` the factory will be global otherwise a static method is added to the class. `resources` can be a list of `rope.base.resource.File`\s that this refactoring should be applied on; if `None` all python files in the project are searched.
def list_checks(ruleset, ruleset_file, debug, json, skip, tag, verbose, checks_paths): """ Print the checks. """ if ruleset and ruleset_file: raise click.BadOptionUsage( "Options '--ruleset' and '--file-ruleset' cannot be used together.") try: if not debug: logging.basicConfig(stream=six.StringIO()) log_level = _get_log_level(debug=debug, verbose=verbose) checks = get_checks(ruleset_name=ruleset, ruleset_file=ruleset_file, logging_level=log_level, tags=tag, checks_paths=checks_paths, skips=skip) _print_checks(checks=checks) if json: AbstractCheck.save_checks_to_json(file=json, checks=checks) except ColinException as ex: logger.error("An error occurred: %r", ex) if debug: raise else: raise click.ClickException(str(ex)) except Exception as ex: logger.error("An error occurred: %r", ex) if debug: raise else: raise click.ClickException(str(ex))
Print the checks.
def _build_conflict_target(self): """Builds the `conflict_target` for the ON CONFLICT clause.""" conflict_target = [] if not isinstance(self.query.conflict_target, list): raise SuspiciousOperation(( '%s is not a valid conflict target, specify ' 'a list of column names, or tuples with column ' 'names and hstore key.' ) % str(self.query.conflict_target)) def _assert_valid_field(field_name): field_name = self._normalize_field_name(field_name) if self._get_model_field(field_name): return raise SuspiciousOperation(( '%s is not a valid conflict target, specify ' 'a list of column names, or tuples with column ' 'names and hstore key.' ) % str(field_name)) for field_name in self.query.conflict_target: _assert_valid_field(field_name) # special handling for hstore keys if isinstance(field_name, tuple): conflict_target.append( '(%s->\'%s\')' % ( self._format_field_name(field_name), field_name[1] ) ) else: conflict_target.append( self._format_field_name(field_name)) return '(%s)' % ','.join(conflict_target)
Builds the `conflict_target` for the ON CONFLICT clause.
def utc2local(date): """DokuWiki returns date with a +0000 timezone. This function convert *date* to the local time. """ date_offset = (datetime.now() - datetime.utcnow()) # Python < 2.7 don't have the 'total_seconds' method so calculate it by hand! date_offset = (date_offset.microseconds + (date_offset.seconds + date_offset.days * 24 * 3600) * 1e6) / 1e6 date_offset = int(round(date_offset / 60 / 60)) return date + timedelta(hours=date_offset)
DokuWiki returns date with a +0000 timezone. This function convert *date* to the local time.
def render_surface_function(surfimg, funcimg=None, alphasurf=0.2, alphafunc=1.0, isosurf=0.5, isofunc=0.5, smoothsurf=None, smoothfunc=None, cmapsurf='grey', cmapfunc='red', filename=None, notebook=False, auto_open=False): """ Render an image as a base surface and an optional collection of other image. ANTsR function: `renderSurfaceFunction` NOTE: The ANTsPy version of this function is actually completely different than the ANTsR version, although they should produce similar results. Arguments --------- surfimg : ANTsImage Input image to use as rendering substrate. funcimg : ANTsImage Input list of images to use as functional overlays. alphasurf : scalar alpha for the surface contour alphafunc : scalar alpha value for functional blobs isosurf : scalar intensity level that defines lower threshold for surface image isofunc : scalar intensity level that defines lower threshold for functional image smoothsurf : scalar (optional) smoothing for the surface image smoothfunc : scalar (optional) smoothing for the functional image cmapsurf : string color map for surface image cmapfunc : string color map for functional image filename : string where to save rendering. if None, will plot interactively notebook : boolean whether you're in a jupyter notebook. Returns ------- N/A Example ------- >>> import ants >>> mni = ants.image_read(ants.get_ants_data('mni')) >>> mnia = ants.image_read(ants.get_ants_data('mnia')) >>> ants.render_surface_function(mni, mnia, alphasurf=0.1, filename='/users/ncullen/desktop/surffnc.png') """ cmap_dict = { 'grey': 'Greys', 'gray': 'Greys', 'red': 'Reds', 'green': 'Greens', 'jet': 'Jet' } if surfimg.dimension != 3: raise ValueError('surfimg must be 3D') #if (filename is None) and (not notebook_render): # raise Exception('Must either 1) give filename, 2) set `html_render`=True or 3) set `notebook_render`=True') if notebook: init_notebook_mode(connected=True) fig_list = [] fig_data_list = [] surfimg = resample_image(surfimg, (3,3,3)) surfimg_arr = surfimg.numpy() surfverts, surffaces,_,_ = skimage.measure.marching_cubes_lewiner(surfimg_arr, isosurf, spacing=(1,1,1)) surffig = FF.create_trisurf(x=surfverts[:,0], y=surfverts[:,1], z=surfverts[:,2], colormap=cmap_dict.get(cmapsurf, cmapsurf), plot_edges=False, simplices=surffaces) surffig['data'][0].update(opacity=alphasurf) fig_list.append(surffig) fig_data_list.append(surffig.data[0]) if funcimg is not None: if not isinstance(funcimg, (tuple,list)): funcimg = [funcimg] if not isinstance(alphafunc, (tuple,list)): alphafunc = [alphafunc]*len(funcimg) if not isinstance(isofunc, (tuple,list)): isofunc = [isofunc]*len(funcimg) if not isinstance(cmapfunc, (tuple,list)): cmapfunc = [cmapfunc]*len(funcimg) #cmapfunc = [cmap_dict.get(c,c) for c in cmapfunc] for i in range(len(cmapfunc)): cmapfunc[i] = 'rgb%s' % str(wc.name_to_rgb(cmapfunc[i])) cmapfunc[i] = [cmapfunc[i]]*2 for func_idx, fimg in enumerate(funcimg): if fimg.dimension != 3: raise ValueError('all funcimgs must be 3D') fimg = resample_image(fimg, (3,3,3)) funcimg_arr = fimg.numpy() funcverts, funcfaces,_,_ = skimage.measure.marching_cubes_lewiner(funcimg_arr, isofunc[func_idx], spacing=(1,1,1)) funcfig = FF.create_trisurf(x=funcverts[:,0], y=funcverts[:,1], z=funcverts[:,2], plot_edges=False, simplices=funcfaces, colormap=cmapfunc[func_idx]) funcfig['data'][0].update(opacity=alphafunc[func_idx]) fig_list.append(funcfig) fig_data_list.append(funcfig.data[0]) if filename is not None: save_file = 'png' image_filename = filename filename = image_filename.split('.')[0] + '.html' else: image_filename = 'ants_plot' filename = 'ants_plot.html' save_file = None try: plot(fig_data_list, image=save_file, filename=filename, image_filename=image_filename, auto_open=auto_open) except PermissionError: print('PermissionError caught - are you running jupyter console? Try launching it with sudo privledges (e.g. `sudo jupyter-console`)')
Render an image as a base surface and an optional collection of other image. ANTsR function: `renderSurfaceFunction` NOTE: The ANTsPy version of this function is actually completely different than the ANTsR version, although they should produce similar results. Arguments --------- surfimg : ANTsImage Input image to use as rendering substrate. funcimg : ANTsImage Input list of images to use as functional overlays. alphasurf : scalar alpha for the surface contour alphafunc : scalar alpha value for functional blobs isosurf : scalar intensity level that defines lower threshold for surface image isofunc : scalar intensity level that defines lower threshold for functional image smoothsurf : scalar (optional) smoothing for the surface image smoothfunc : scalar (optional) smoothing for the functional image cmapsurf : string color map for surface image cmapfunc : string color map for functional image filename : string where to save rendering. if None, will plot interactively notebook : boolean whether you're in a jupyter notebook. Returns ------- N/A Example ------- >>> import ants >>> mni = ants.image_read(ants.get_ants_data('mni')) >>> mnia = ants.image_read(ants.get_ants_data('mnia')) >>> ants.render_surface_function(mni, mnia, alphasurf=0.1, filename='/users/ncullen/desktop/surffnc.png')
def read_file(file_name, encoding='utf-8'): """ 读文本文件 :param encoding: :param file_name: :return: """ with open(file_name, 'rb') as f: data = f.read() if encoding is not None: data = data.decode(encoding) return data
读文本文件 :param encoding: :param file_name: :return:
def _filter_result(result, filter_functions=None): """ Filter result with given filter functions. :param result: an iterable object :param filter_functions: some filter functions :return: a filter object (filtered result) """ if filter_functions is not None: for filter_func in filter_functions: result = filter(filter_func, result) return result
Filter result with given filter functions. :param result: an iterable object :param filter_functions: some filter functions :return: a filter object (filtered result)
def exception(self, timeout=None): """Return the exception raised by the call that the future represents. Args: timeout: The number of seconds to wait for the exception if the future isn't done. If None, then there is no limit on the wait time. Returns: The exception raised by the call that the future represents or None if the call completed without raising. Raises: TimeoutError: If the future didn't finish executing before the given timeout. """ if self._state == self.RUNNING: self._context.wait_all_futures([self], timeout) return self._exception
Return the exception raised by the call that the future represents. Args: timeout: The number of seconds to wait for the exception if the future isn't done. If None, then there is no limit on the wait time. Returns: The exception raised by the call that the future represents or None if the call completed without raising. Raises: TimeoutError: If the future didn't finish executing before the given timeout.
def preconstrain_flag_page(self, magic_content): """ Preconstrain the data in the flag page. :param magic_content: The content of the magic page as a bytestring. """ for m, v in zip(magic_content, self.state.cgc.flag_bytes): self.preconstrain(m, v)
Preconstrain the data in the flag page. :param magic_content: The content of the magic page as a bytestring.
def skycoord_to_healpix(self, skycoord, return_offsets=False): """ Convert celestial coordinates to HEALPix indices (optionally with offsets). Note that this method requires that a celestial frame was specified when initializing HEALPix. If you don't know or need the celestial frame, you can instead use :meth:`~astropy_healpix.HEALPix.lonlat_to_healpix`. Parameters ---------- skycoord : :class:`~astropy.coordinates.SkyCoord` The celestial coordinates to convert return_offsets : bool If `True`, the returned values are the HEALPix pixel as well as ``dx`` and ``dy``, the fractional positions inside the pixel. If `False` (the default), only the HEALPix pixel is returned. Returns ------- healpix_index : `~numpy.ndarray` 1-D array of HEALPix indices dx, dy : `~numpy.ndarray` 1-D arrays of offsets inside the HEALPix pixel in the range [0:1] (0.5 is the center of the HEALPix pixels). This is returned if ``return_offsets`` is `True`. """ if self.frame is None: raise NoFrameError("skycoord_to_healpix") skycoord = skycoord.transform_to(self.frame) representation = skycoord.represent_as(UnitSphericalRepresentation) lon, lat = representation.lon, representation.lat return self.lonlat_to_healpix(lon, lat, return_offsets=return_offsets)
Convert celestial coordinates to HEALPix indices (optionally with offsets). Note that this method requires that a celestial frame was specified when initializing HEALPix. If you don't know or need the celestial frame, you can instead use :meth:`~astropy_healpix.HEALPix.lonlat_to_healpix`. Parameters ---------- skycoord : :class:`~astropy.coordinates.SkyCoord` The celestial coordinates to convert return_offsets : bool If `True`, the returned values are the HEALPix pixel as well as ``dx`` and ``dy``, the fractional positions inside the pixel. If `False` (the default), only the HEALPix pixel is returned. Returns ------- healpix_index : `~numpy.ndarray` 1-D array of HEALPix indices dx, dy : `~numpy.ndarray` 1-D arrays of offsets inside the HEALPix pixel in the range [0:1] (0.5 is the center of the HEALPix pixels). This is returned if ``return_offsets`` is `True`.
def get_parameter(self, path, default=None, return_group=False): """ Reads hyperparameter from job configuration. If nothing found use given default. :param path: str :param default: * :param return_group: If true and path is a choice_group, we return the dict instead of the group name. :return: * """ value = read_parameter_by_path(self.job['config']['parameters'], path, return_group) if value is None: return default return value
Reads hyperparameter from job configuration. If nothing found use given default. :param path: str :param default: * :param return_group: If true and path is a choice_group, we return the dict instead of the group name. :return: *
def add(name, beacon_data, **kwargs): ''' Add a beacon on the minion Args: name (str): Name of the beacon to configure beacon_data (dict): Dictionary or list containing configuration for beacon. Returns: dict: Boolean and status message on success or failure of add. CLI Example: .. code-block:: bash salt '*' beacons.add ps "[{'processes': {'salt-master': 'stopped', 'apache2': 'stopped'}}]" ''' ret = {'comment': 'Failed to add beacon {0}.'.format(name), 'result': False} if name in list_(return_yaml=False, **kwargs): ret['comment'] = 'Beacon {0} is already configured.'.format(name) return ret # Check to see if a beacon_module is specified, if so, verify it is # valid and available beacon type. if any('beacon_module' in key for key in beacon_data): res = next(value for value in beacon_data if 'beacon_module' in value) beacon_name = res['beacon_module'] else: beacon_name = name if beacon_name not in list_available(return_yaml=False, **kwargs): ret['comment'] = 'Beacon "{0}" is not available.'.format(beacon_name) return ret if 'test' in kwargs and kwargs['test']: ret['result'] = True ret['comment'] = 'Beacon: {0} would be added.'.format(name) else: try: # Attempt to load the beacon module so we have access to the # validate function eventer = salt.utils.event.get_event('minion', opts=__opts__) res = __salt__['event.fire']({'name': name, 'beacon_data': beacon_data, 'func': 'validate_beacon'}, 'manage_beacons') if res: event_ret = eventer.get_event( tag='/salt/minion/minion_beacon_validation_complete', wait=kwargs.get('timeout', 30)) valid = event_ret['valid'] vcomment = event_ret['vcomment'] if not valid: ret['result'] = False ret['comment'] = ('Beacon {0} configuration invalid, ' 'not adding.\n{1}'.format(name, vcomment)) return ret except KeyError: # Effectively a no-op, since we can't really return without an event # system ret['result'] = False ret['comment'] = 'Event module not available. Beacon add failed.' return ret try: res = __salt__['event.fire']({'name': name, 'beacon_data': beacon_data, 'func': 'add'}, 'manage_beacons') if res: event_ret = eventer.get_event( tag='/salt/minion/minion_beacon_add_complete', wait=kwargs.get('timeout', 30)) if event_ret and event_ret['complete']: beacons = event_ret['beacons'] if name in beacons and beacons[name] == beacon_data: ret['result'] = True ret['comment'] = 'Added beacon: {0}.'.format(name) elif event_ret: ret['result'] = False ret['comment'] = event_ret['comment'] else: ret['result'] = False ret['comment'] = 'Did not receive the manage event ' \ 'before the timeout of {0}s' \ ''.format(kwargs.get('timeout', 30)) return ret except KeyError: # Effectively a no-op, since we can't really return without an event # system ret['result'] = False ret['comment'] = 'Event module not available. Beacon add failed.' return ret
Add a beacon on the minion Args: name (str): Name of the beacon to configure beacon_data (dict): Dictionary or list containing configuration for beacon. Returns: dict: Boolean and status message on success or failure of add. CLI Example: .. code-block:: bash salt '*' beacons.add ps "[{'processes': {'salt-master': 'stopped', 'apache2': 'stopped'}}]"
def parse_schema(schema_file): """ parses the schema file and returns the columns that are later going to represent the columns of the genometric space dataframe :param schema_file: the path to the schema file :return: the columns of the schema file """ e = xml.etree.ElementTree.parse(schema_file) root = e.getroot() cols = [] for elem in root.findall(".//{http://genomic.elet.polimi.it/entities}field"): # XPATH cols.append(elem.text) return cols
parses the schema file and returns the columns that are later going to represent the columns of the genometric space dataframe :param schema_file: the path to the schema file :return: the columns of the schema file
def read(self, lenient=False): """ Read the PNG file and decode it. Returns (`width`, `height`, `pixels`, `metadata`). May use excessive memory. `pixels` are returned in boxed row flat pixel format. If the optional `lenient` argument evaluates to True, checksum failures will raise warnings rather than exceptions. """ self.preamble(lenient=lenient) raw = self.idatdecomp(lenient) if self.interlace: raw = bytearray(itertools.chain(*raw)) arraycode = 'BH'[self.bitdepth > 8] # Like :meth:`group` but producing an array.array object for # each row. pixels = map(lambda *row: array(arraycode, row), *[iter(self.deinterlace(raw))]*self.width*self.planes) else: pixels = self.iterboxed(self.iterstraight(raw)) meta = dict() for attr in 'greyscale alpha planes bitdepth interlace'.split(): meta[attr] = getattr(self, attr) meta['size'] = (self.width, self.height) for attr in ('gamma', 'transparent', 'background', 'last_mod_time', 'icc_profile', 'resolution', 'text', 'rendering_intent', 'white_point', 'rgb_points'): a = getattr(self, attr, None) if a is not None: meta[attr] = a if self.plte: meta['palette'] = self.palette() return self.width, self.height, pixels, meta
Read the PNG file and decode it. Returns (`width`, `height`, `pixels`, `metadata`). May use excessive memory. `pixels` are returned in boxed row flat pixel format. If the optional `lenient` argument evaluates to True, checksum failures will raise warnings rather than exceptions.
def sub_menu(self): """Create daemon submenu """ submenu = gtk.Menu() self.start = gtk.ImageMenuItem("Start") self.stop = gtk.ImageMenuItem("Stop") self.restart = gtk.ImageMenuItem("Restart") self.status = gtk.ImageMenuItem("Status") self.start.show() self.stop.show() self.restart.show() self.status.show() img_Start = gtk.image_new_from_stock(gtk.STOCK_MEDIA_PLAY, gtk.ICON_SIZE_MENU) img_Start.show() self.start.set_image(img_Start) img_Stop = gtk.image_new_from_stock(gtk.STOCK_STOP, gtk.ICON_SIZE_MENU) img_Stop.show() self.stop.set_image(img_Stop) img_Restart = gtk.image_new_from_stock(gtk.STOCK_REFRESH, gtk.ICON_SIZE_MENU) img_Restart.show() self.restart.set_image(img_Restart) img_Status = gtk.image_new_from_stock(gtk.STOCK_DIALOG_QUESTION, gtk.ICON_SIZE_MENU) img_Status.show() self.status.set_image(img_Status) submenu.append(self.start) submenu.append(self.stop) submenu.append(self.restart) submenu.append(self.status) self.daemon = gtk.ImageMenuItem("Daemon") self.img_daemon = gtk.image_new_from_stock(self.daemon_STOCK, gtk.ICON_SIZE_MENU) self.img_daemon.show() self.daemon.set_submenu(submenu)
Create daemon submenu
def putParamset(self, remote, address, paramset, value): """Set paramsets manually""" if self._server is not None: return self._server.putParamset(remote, address, paramset, value)
Set paramsets manually
def on_before_trading(self, date_time): """开盘的时候检查,如果有持仓,就把持有天数 + 1""" if self.cta_call['pos'] > 0: self.cta_call['days'] += 1 if self.cta_put['pos'] > 0: self.cta_put['days'] += 1 self.cta_call['done'] = False self.cta_put['done'] = False
开盘的时候检查,如果有持仓,就把持有天数 + 1
def print_task_output(batch_client, job_id, task_ids, encoding=None): """Prints the stdout and stderr for each task specified. Originally in azure-batch-samples.Python.Batch.common.helpers :param batch_client: The batch client to use. :type batch_client: `batchserviceclient.BatchServiceClient` :param str job_id: The id of the job to monitor. :param task_ids: The collection of tasks to print the output for. :type task_ids: `list` :param str encoding: The encoding to use when downloading the file. """ for task_id in task_ids: file_text = read_task_file_as_string( batch_client, job_id, task_id, _STANDARD_OUT_FILE_NAME, encoding) print("{} content for task {}: ".format( _STANDARD_OUT_FILE_NAME, task_id)) print(file_text) file_text = read_task_file_as_string( batch_client, job_id, task_id, _STANDARD_ERROR_FILE_NAME, encoding) print("{} content for task {}: ".format( _STANDARD_ERROR_FILE_NAME, task_id)) print(file_text)
Prints the stdout and stderr for each task specified. Originally in azure-batch-samples.Python.Batch.common.helpers :param batch_client: The batch client to use. :type batch_client: `batchserviceclient.BatchServiceClient` :param str job_id: The id of the job to monitor. :param task_ids: The collection of tasks to print the output for. :type task_ids: `list` :param str encoding: The encoding to use when downloading the file.
def render_tag(self, context, kwargs, nodelist): '''render content with "active" urls logic''' # load configuration from passed options self.load_configuration(**kwargs) # get request from context request = context['request'] # get full path from request self.full_path = request.get_full_path() # render content of template tag context.push() content = nodelist.render(context) context.pop() # check content for "active" urls content = render_content( content, full_path=self.full_path, parent_tag=self.parent_tag, css_class=self.css_class, menu=self.menu, ignore_params=self.ignore_params, ) return content
render content with "active" urls logic
def find_in_registry(category = None, namespace = None, name = None): """ Find a given category/namespace/name combination in the registry category - string, see utils.inputs.registrycategories namespace - module namespace, see settings.NAMESPACE name - lowercase name of module """ selected_registry = registry if category is not None: selected_registry = [re for re in selected_registry if re.category==category] if namespace is not None: selected_registry = [re for re in selected_registry if re.namespace==namespace] if name is not None: selected_registry = [re for re in selected_registry if re.name==name] if len(selected_registry)>0: return [sr.cls for sr in selected_registry] return None
Find a given category/namespace/name combination in the registry category - string, see utils.inputs.registrycategories namespace - module namespace, see settings.NAMESPACE name - lowercase name of module
def get_interpolated_gap(self, tol=0.001, abs_tol=False, spin=None): """ Expects a DOS object and finds the gap Args: tol: tolerance in occupations for determining the gap abs_tol: Set to True for an absolute tolerance and False for a relative one. spin: Possible values are None - finds the gap in the summed densities, Up - finds the gap in the up spin channel, Down - finds the gap in the down spin channel. Returns: (gap, cbm, vbm): Tuple of floats in eV corresponding to the gap, cbm and vbm. """ tdos = self.y if len(self.ydim) == 1 else np.sum(self.y, axis=1) if not abs_tol: tol = tol * tdos.sum() / tdos.shape[0] energies = self.x below_fermi = [i for i in range(len(energies)) if energies[i] < self.efermi and tdos[i] > tol] above_fermi = [i for i in range(len(energies)) if energies[i] > self.efermi and tdos[i] > tol] vbm_start = max(below_fermi) cbm_start = min(above_fermi) if vbm_start == cbm_start: return 0.0, self.efermi, self.efermi else: # Interpolate between adjacent values terminal_dens = tdos[vbm_start:vbm_start + 2][::-1] terminal_energies = energies[vbm_start:vbm_start + 2][::-1] start = get_linear_interpolated_value(terminal_dens, terminal_energies, tol) terminal_dens = tdos[cbm_start - 1:cbm_start + 1] terminal_energies = energies[cbm_start - 1:cbm_start + 1] end = get_linear_interpolated_value(terminal_dens, terminal_energies, tol) return end - start, end, start
Expects a DOS object and finds the gap Args: tol: tolerance in occupations for determining the gap abs_tol: Set to True for an absolute tolerance and False for a relative one. spin: Possible values are None - finds the gap in the summed densities, Up - finds the gap in the up spin channel, Down - finds the gap in the down spin channel. Returns: (gap, cbm, vbm): Tuple of floats in eV corresponding to the gap, cbm and vbm.
def commit(self): """Commit a batch.""" assert self.batch is not None, "No active batch, call start() first" logger.debug("Comitting batch from %d sources...", len(self.batch)) # Determine item priority. by_priority = [] for name in self.batch.keys(): priority = self.priorities.get(name, self.default_priority) by_priority.append((priority, name)) for priority, name in sorted(by_priority, key=lambda key: key[0]): logger.debug("Processing items from '%s' (priority=%d)...", name, priority) items = self.batch[name] for handlers in items.values(): for agg, handler in handlers: try: if agg is None: handler() else: handler(agg) except Exception as error: # Log errors and proceed to evaluate the next handler. logger.exception("Error while invoking handler.") self.batch = None logger.debug("Batch committed.")
Commit a batch.
def list(self, all_pages=False, **kwargs): """Return a list of notification templates. Note here configuration-related fields like 'notification_configuration' and 'channels' will not be used even provided. If one or more filters are provided through keyword arguments, filter the results accordingly. If no filters are provided, return all results. =====API DOCS===== Retrieve a list of objects. :param all_pages: Flag that if set, collect all pages of content from the API when returning results. :type all_pages: bool :param page: The page to show. Ignored if all_pages is set. :type page: int :param query: Contains 2-tuples used as query parameters to filter resulting resource objects. :type query: list :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects. :returns: A JSON object containing details of all resource objects returned by Tower backend. :rtype: dict =====API DOCS===== """ self._separate(kwargs) return super(Resource, self).list(all_pages=all_pages, **kwargs)
Return a list of notification templates. Note here configuration-related fields like 'notification_configuration' and 'channels' will not be used even provided. If one or more filters are provided through keyword arguments, filter the results accordingly. If no filters are provided, return all results. =====API DOCS===== Retrieve a list of objects. :param all_pages: Flag that if set, collect all pages of content from the API when returning results. :type all_pages: bool :param page: The page to show. Ignored if all_pages is set. :type page: int :param query: Contains 2-tuples used as query parameters to filter resulting resource objects. :type query: list :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects. :returns: A JSON object containing details of all resource objects returned by Tower backend. :rtype: dict =====API DOCS=====
def inheritance_patch(attrs): """Patch tango objects before they are processed by the metaclass.""" for key, obj in attrs.items(): if isinstance(obj, attribute): if getattr(obj, 'attr_write', None) == AttrWriteType.READ_WRITE: if not getattr(obj, 'fset', None): method_name = obj.write_method_name or "write_" + key obj.fset = attrs.get(method_name)
Patch tango objects before they are processed by the metaclass.
def inherit_kwargs(inherit_func): """ TODO move to util_decor inherit_func = inspect_pdfs func = encoder.visualize.im_func """ import utool as ut keys, is_arbitrary = ut.get_kwargs(inherit_func) if is_arbitrary: keys += ['**kwargs'] kwargs_append = '\n'.join(keys) #from six.moves import builtins #builtins.print(kwargs_block) def _wrp(func): if func.__doc__ is None: func.__doc__ = '' # TODO append to kwargs block if it exists kwargs_block = 'Kwargs:\n' + ut.indent(kwargs_append) func.__doc__ += kwargs_block return func return _wrp
TODO move to util_decor inherit_func = inspect_pdfs func = encoder.visualize.im_func
def get_vcs_details_output_vcs_details_node_vcs_mode(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vcs_details = ET.Element("get_vcs_details") config = get_vcs_details output = ET.SubElement(get_vcs_details, "output") vcs_details = ET.SubElement(output, "vcs-details") node_vcs_mode = ET.SubElement(vcs_details, "node-vcs-mode") node_vcs_mode.text = kwargs.pop('node_vcs_mode') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def bug(self, container: Container) -> Bug: """ Returns a description of the bug inside a given container. """ name = container.bug return self.__installation.bugs[name]
Returns a description of the bug inside a given container.
def debug(sequence): """ adds information to the sequence for better debugging, currently only an index property on each point in the sequence. """ points = [] for i, p in enumerate(sequence): copy = Point(p) copy['index'] = i points.append(copy) return sequence.__class__(points)
adds information to the sequence for better debugging, currently only an index property on each point in the sequence.
def get_device_info(self, bigip): '''Get device information about a specific BigIP device. :param bigip: bigip object --- device to inspect :returns: bigip object ''' coll = bigip.tm.cm.devices.get_collection() device = [device for device in coll if device.selfDevice == 'true'] assert len(device) == 1 return device[0]
Get device information about a specific BigIP device. :param bigip: bigip object --- device to inspect :returns: bigip object
def actions(connection): """List all actions.""" session = _make_session(connection=connection) for action in Action.ls(session=session): click.echo(f'{action.created} {action.action} {action.resource}')
List all actions.
def asini(b, orbit, solve_for=None): """ Create a constraint for asini in an orbit. If any of the required parameters ('asini', 'sma', 'incl') do not exist in the orbit, they will be created. :parameter b: the :class:`phoebe.frontend.bundle.Bundle` :parameter str orbit: the label of the orbit in which this constraint should be built :parameter str solve_for: if 'asini' should not be the derived/constrained parameter, provide which other parameter should be derived (ie 'sma' or 'incl') :returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments that were passed to this function) """ orbit_ps = _get_system_ps(b, orbit) # We want to get the parameters in THIS orbit, but calling through # the bundle in case we need to create it. # To do that, we need to know the search parameters to get items from this PS. metawargs = orbit_ps.meta metawargs.pop('qualifier') # Now we'll define the parameters in case they don't exist and need to be created sma_def = FloatParameter(qualifier='sma', value=8.0, default_unit=u.solRad, description='Semi major axis') incl_def = FloatParameter(qualifier='incl', value=90.0, default_unit=u.deg, description='Orbital inclination angle') asini_def = FloatParameter(qualifier='asini', value=8.0, default_unit=u.solRad, description='Projected semi major axis') # And now call get_or_create on the bundle sma, created = b.get_or_create('sma', sma_def, **metawargs) incl, created = b.get_or_create('incl', incl_def, **metawargs) asini, created = b.get_or_create('asini', asini_def, **metawargs) if solve_for in [None, asini]: lhs = asini rhs = sma * sin(incl) elif solve_for == sma: lhs = sma rhs = asini / sin(incl) elif solve_for == incl: lhs = incl rhs = arcsin(asini/sma) else: raise NotImplementedError #- return lhs, rhs, args_as_pss return lhs, rhs, {'orbit': orbit}
Create a constraint for asini in an orbit. If any of the required parameters ('asini', 'sma', 'incl') do not exist in the orbit, they will be created. :parameter b: the :class:`phoebe.frontend.bundle.Bundle` :parameter str orbit: the label of the orbit in which this constraint should be built :parameter str solve_for: if 'asini' should not be the derived/constrained parameter, provide which other parameter should be derived (ie 'sma' or 'incl') :returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments that were passed to this function)
def ControlFromHandle(handle: int) -> Control: """ Call IUIAutomation.ElementFromHandle with a native handle. handle: int, a native window handle. Return `Control` subclass. """ return Control.CreateControlFromElement(_AutomationClient.instance().IUIAutomation.ElementFromHandle(handle))
Call IUIAutomation.ElementFromHandle with a native handle. handle: int, a native window handle. Return `Control` subclass.
def get(self, request, **resources): """ Default GET method. Return instance (collection) by model. :return object: instance or collection from self model """ instance = resources.get(self._meta.name) if not instance is None: return instance return self.paginate( request, self.get_collection(request, **resources))
Default GET method. Return instance (collection) by model. :return object: instance or collection from self model
def imslic(img, n_segments=100, aspect=None): """ slic args : n_segments=100, compactness=10., max_iter=10, sigma=0, spacing=None, multichannel=True, convert2lab=None, enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3, slic_zero=False mark_boundaries args: label_img, color=(1, 1, 0), outline_color=None, mode='outer', background_label=0 imshow args: cmap=None, norm=None, aspect=None, interpolation=None, alpha=None, vmin=None, vmax=None, origin=None, extent=None, shape=None, filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None, hold=None, data=None, :param img: :param slicarg: :param slickw: :return: """ from skimage.segmentation import (slic, mark_boundaries) from skimage.morphology import (dilation) if img.ndim == 2 or img.ndim == 3 and img.shape[-1] == 1: imz = np.stack([img, img, img], 2) else: imz = img slics = slic(imz, n_segments=n_segments) boundaries = mark_boundaries(imz, slics) return plt.imshow(boundaries, aspect=aspect)
slic args : n_segments=100, compactness=10., max_iter=10, sigma=0, spacing=None, multichannel=True, convert2lab=None, enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3, slic_zero=False mark_boundaries args: label_img, color=(1, 1, 0), outline_color=None, mode='outer', background_label=0 imshow args: cmap=None, norm=None, aspect=None, interpolation=None, alpha=None, vmin=None, vmax=None, origin=None, extent=None, shape=None, filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None, hold=None, data=None, :param img: :param slicarg: :param slickw: :return:
def set_to_cache(self): """ Add widget's attributes to Django's cache. Split the QuerySet, to not pickle the result set. """ queryset = self.get_queryset() cache.set(self._get_cache_key(), { 'queryset': [ queryset.none(), queryset.query, ], 'cls': self.__class__, 'search_fields': tuple(self.search_fields), 'max_results': int(self.max_results), 'url': str(self.get_url()), 'dependent_fields': dict(self.dependent_fields), })
Add widget's attributes to Django's cache. Split the QuerySet, to not pickle the result set.
def parse_file(path, format=None, encoding='utf-8', force_types=True): """A convenience wrapper of parse, which accepts path of file to parse. Args: path: path to file to parse format: explicitly override the guessed `inp` markup format encoding: file encoding, defaults to utf-8 force_types: if `True`, integers, floats, booleans and none/null are recognized and returned as proper types instead of strings; if `False`, everything is converted to strings if `None`, backend return value is used Returns: parsed `inp` (dict or list) containing unicode values Raises: AnyMarkupError if a problem occurs while parsing """ try: with open(path, 'rb') as f: return parse(f, format, encoding, force_types) except EnvironmentError as e: raise AnyMarkupError(e, traceback.format_exc())
A convenience wrapper of parse, which accepts path of file to parse. Args: path: path to file to parse format: explicitly override the guessed `inp` markup format encoding: file encoding, defaults to utf-8 force_types: if `True`, integers, floats, booleans and none/null are recognized and returned as proper types instead of strings; if `False`, everything is converted to strings if `None`, backend return value is used Returns: parsed `inp` (dict or list) containing unicode values Raises: AnyMarkupError if a problem occurs while parsing
def experiment_completed(self): """Checks the current state of the experiment to see whether it has completed. This makes use of the experiment server `/summary` route, which in turn uses :meth:`~Experiment.is_complete`. """ heroku_app = HerokuApp(self.app_id) status_url = "{}/summary".format(heroku_app.url) data = {} try: resp = requests.get(status_url) data = resp.json() except (ValueError, requests.exceptions.RequestException): logger.exception("Error fetching experiment status.") logger.debug("Current application state: {}".format(data)) return data.get("completed", False)
Checks the current state of the experiment to see whether it has completed. This makes use of the experiment server `/summary` route, which in turn uses :meth:`~Experiment.is_complete`.
def is_disabled_action(view): """ Checks whether Link action is disabled. """ if not isinstance(view, core_views.ActionsViewSet): return False action = getattr(view, 'action', None) return action in view.disabled_actions if action is not None else False
Checks whether Link action is disabled.
def recv(self): """Returns the reply message or None if there was no reply.""" try: items = self.poller.poll(self.timeout) except KeyboardInterrupt: return # interrupted if items: # if we got a reply, process it msg = self.client.recv_multipart() self.close() if self.verbose: logging.info("I: received reply:") dump(msg) # Don't try to handle errors, just assert noisily assert len(msg) >= 4 #first drop will be drop (cause empty) header = msg.pop(0) header = msg.pop(0) assert MDP.C_CLIENT == header #this one contains servicename #TODO: exploit this header = msg.pop(0) return msg else: logging.warn("W: permanent error, abandoning request")
Returns the reply message or None if there was no reply.
def _parse_results(self, raw_results, includes_qualifiers): """ Parse WMI query results in a more comprehensive form. Returns: List of WMI objects ``` [ { 'freemegabytes': 19742.0, 'name': 'C:', 'avgdiskbytesperwrite': 1536.0 }, { 'freemegabytes': 19742.0, 'name': 'D:', 'avgdiskbytesperwrite': 1536.0 } ] ``` """ results = [] for res in raw_results: # Ensure all properties are available. Use case-insensitivity # because some properties are returned with different cases. item = CaseInsensitiveDict() for prop_name in self.property_names: item[prop_name] = None for wmi_property in res.Properties_: # IMPORTANT: To improve performance, only access the Qualifiers # if the "CounterType" hasn't already been cached. should_get_qualifier_type = ( includes_qualifiers and wmi_property.Name not in self._property_counter_types ) if should_get_qualifier_type: # Can't index into "Qualifiers_" for keys that don't exist # without getting an exception. qualifiers = dict((q.Name, q.Value) for q in wmi_property.Qualifiers_) # Some properties like "Name" and "Timestamp_Sys100NS" do # not have a "CounterType" (since they're not a counter). # Therefore, they're ignored. if "CounterType" in qualifiers: counter_type = qualifiers["CounterType"] self._property_counter_types[wmi_property.Name] = counter_type self.logger.debug( u"Caching property qualifier CounterType: " "{class_name}.{property_names} = {counter_type}".format( class_name=self.class_name, property_names=wmi_property.Name, counter_type=counter_type ) ) else: self.logger.debug( u"CounterType qualifier not found for {class_name}.{property_names}".format( class_name=self.class_name, property_names=wmi_property.Name ) ) try: item[wmi_property.Name] = float(wmi_property.Value) except (TypeError, ValueError): item[wmi_property.Name] = wmi_property.Value results.append(item) return results
Parse WMI query results in a more comprehensive form. Returns: List of WMI objects ``` [ { 'freemegabytes': 19742.0, 'name': 'C:', 'avgdiskbytesperwrite': 1536.0 }, { 'freemegabytes': 19742.0, 'name': 'D:', 'avgdiskbytesperwrite': 1536.0 } ] ```
def select_rows(self, rows): ''' Truncate internal arrays to keep only the specified rows. Args: rows (array): An integer or boolean array identifying the indices of rows to keep. ''' self.values = self.values.iloc[rows] self.index = self.index.iloc[rows, :] for prop in self._property_columns: vals = getattr(self, prop)[rows] setattr(self, prop, vals)
Truncate internal arrays to keep only the specified rows. Args: rows (array): An integer or boolean array identifying the indices of rows to keep.
def atomic_to_cim_xml(obj): """ Convert an "atomic" scalar value to a CIM-XML string and return that string. The returned CIM-XML string is ready for use as the text of a CIM-XML 'VALUE' element. Parameters: obj (:term:`CIM data type`, :term:`number`, :class:`py:datetime`): The "atomic" input value. May be `None`. Must not be an array/list/tuple. Must not be a :ref:`CIM object`. Returns: A :term:`unicode string` object in CIM-XML value format representing the input value. `None`, if the input value is `None`. Raises: TypeError """ if obj is None: # pylint: disable=no-else-return return obj elif isinstance(obj, six.text_type): return obj elif isinstance(obj, six.binary_type): return _to_unicode(obj) elif isinstance(obj, bool): return u'TRUE' if obj else u'FALSE' elif isinstance(obj, (CIMInt, six.integer_types, CIMDateTime)): return six.text_type(obj) elif isinstance(obj, datetime): return six.text_type(CIMDateTime(obj)) elif isinstance(obj, Real32): # DSP0201 requirements for representing real32: # The significand must be represented with at least 11 digits. # The special values must have the case: INF, -INF, NaN. s = u'{0:.11G}'.format(obj) if s == 'NAN': s = u'NaN' elif s in ('INF', '-INF'): pass elif '.' not in s: parts = s.split('E') parts[0] = parts[0] + '.0' s = 'E'.join(parts) return s elif isinstance(obj, (Real64, float)): # DSP0201 requirements for representing real64: # The significand must be represented with at least 17 digits. # The special values must have the case: INF, -INF, NaN. s = u'{0:.17G}'.format(obj) if s == 'NAN': s = u'NaN' elif s in ('INF', '-INF'): pass elif '.' not in s: parts = s.split('E') parts[0] = parts[0] + '.0' s = 'E'.join(parts) return s else: raise TypeError( _format("Value {0!A} has invalid type {1} for conversion to a " "CIM-XML string", obj, type(obj)))
Convert an "atomic" scalar value to a CIM-XML string and return that string. The returned CIM-XML string is ready for use as the text of a CIM-XML 'VALUE' element. Parameters: obj (:term:`CIM data type`, :term:`number`, :class:`py:datetime`): The "atomic" input value. May be `None`. Must not be an array/list/tuple. Must not be a :ref:`CIM object`. Returns: A :term:`unicode string` object in CIM-XML value format representing the input value. `None`, if the input value is `None`. Raises: TypeError
def get_season_player_stats(self, season_key, player_key): """ Calling Season Player Stats API. Arg: season_key: key of the season player_key: key of the player Return: json data """ season_player_stats_url = self.api_path + "season/" + season_key + "/player/" + player_key + "/stats/" response = self.get_response(season_player_stats_url) return response
Calling Season Player Stats API. Arg: season_key: key of the season player_key: key of the player Return: json data
def _drop_schema(self, force_drop=False): """ Drops the schema""" connection = connections[get_tenant_database_alias()] has_schema = hasattr(connection, 'schema_name') if has_schema and connection.schema_name not in (self.schema_name, get_public_schema_name()): raise Exception("Can't delete tenant outside it's own schema or " "the public schema. Current schema is %s." % connection.schema_name) if has_schema and schema_exists(self.schema_name) and (self.auto_drop_schema or force_drop): self.pre_drop() cursor = connection.cursor() cursor.execute('DROP SCHEMA %s CASCADE' % self.schema_name)
Drops the schema
def predict_moments(self, X): """ Full predictive distribution from Bayesian linear regression. Parameters ---------- X : ndarray (N*,d) array query input dataset (N* samples, d dimensions). Returns ------- Ey : ndarray The expected value of y* for the query inputs, X* of shape (N*,). Vy : ndarray The expected variance of y* for the query inputs, X* of shape (N*,). """ check_is_fitted(self, ['var_', 'regularizer_', 'weights_', 'covariance_', 'hypers_']) X = check_array(X) Phi = self.basis.transform(X, *atleast_list(self.hypers_)) Ey = Phi.dot(self.weights_) Vf = (Phi.dot(self.covariance_) * Phi).sum(axis=1) return Ey, Vf + self.var_
Full predictive distribution from Bayesian linear regression. Parameters ---------- X : ndarray (N*,d) array query input dataset (N* samples, d dimensions). Returns ------- Ey : ndarray The expected value of y* for the query inputs, X* of shape (N*,). Vy : ndarray The expected variance of y* for the query inputs, X* of shape (N*,).
def add_ecc_cgw(psr, gwtheta, gwphi, mc, dist, F, inc, psi, gamma0, e0, l0, q, nmax=100, nset=None, pd=None, periEv=True, psrTerm=True, tref=0, check=True, useFile=True): """ Simulate GW from eccentric SMBHB. Waveform models from Taylor et al. (2015) and Barack and Cutler (2004). WARNING: This residual waveform is only accurate if the GW frequency is not significantly evolving over the observation time of the pulsar. :param psr: pulsar object :param gwtheta: Polar angle of GW source in celestial coords [radians] :param gwphi: Azimuthal angle of GW source in celestial coords [radians] :param mc: Chirp mass of SMBMB [solar masses] :param dist: Luminosity distance to SMBMB [Mpc] :param F: Orbital frequency of SMBHB [Hz] :param inc: Inclination of GW source [radians] :param psi: Polarization of GW source [radians] :param gamma0: Initial angle of periastron [radians] :param e0: Initial eccentricity of SMBHB :param l0: Initial mean anomaly [radians] :param q: Mass ratio of SMBHB :param nmax: Number of harmonics to use in waveform decomposition :param nset: Fix the number of harmonics to be injected :param pd: Pulsar distance [kpc] :param periEv: Evolve the position of periapsis [boolean] :param psrTerm: Option to include pulsar term [boolean] :param tref: Fiducial time at which initial parameters are referenced [s] :param check: Check if frequency evolves significantly over obs. time :param useFile: Use pre-computed table of number of harmonics vs eccentricity :returns: Vector of induced residuals """ # define variable for later use cosgwtheta, cosgwphi = N.cos(gwtheta), N.cos(gwphi) singwtheta, singwphi = N.sin(gwtheta), N.sin(gwphi) sin2psi, cos2psi = N.sin(2*psi), N.cos(2*psi) # unit vectors to GW source m = N.array([singwphi, -cosgwphi, 0.0]) n = N.array([-cosgwtheta*cosgwphi, -cosgwtheta*singwphi, singwtheta]) omhat = N.array([-singwtheta*cosgwphi, -singwtheta*singwphi, -cosgwtheta]) # pulsar location if 'RAJ' and 'DECJ' in psr.pars(): ptheta = N.pi/2 - psr['DECJ'].val pphi = psr['RAJ'].val elif 'ELONG' and 'ELAT' in psr.pars(): fac = 180./N.pi coords = ephem.Equatorial(ephem.Ecliptic(str(psr['ELONG'].val*fac), str(psr['ELAT'].val*fac))) ptheta = N.pi/2 - float(repr(coords.dec)) pphi = float(repr(coords.ra)) # use definition from Sesana et al 2010 and Ellis et al 2012 phat = N.array([N.sin(ptheta)*N.cos(pphi), N.sin(ptheta)*N.sin(pphi),\ N.cos(ptheta)]) fplus = 0.5 * (N.dot(m, phat)**2 - N.dot(n, phat)**2) / (1+N.dot(omhat, phat)) fcross = (N.dot(m, phat)*N.dot(n, phat)) / (1 + N.dot(omhat, phat)) cosMu = -N.dot(omhat, phat) # get values from pulsar object toas = N.double(psr.toas())*86400 - tref if check: # check that frequency is not evolving significantly over obs. time y = eu.solve_coupled_ecc_solution(F, e0, gamma0, l0, mc, q, N.array([0.0,toas.max()])) # initial and final values over observation time Fc0, ec0, gc0, phic0 = y[0,:] Fc1, ec1, gc1, phic1 = y[-1,:] # observation time Tobs = 1/(toas.max()-toas.min()) if N.abs(Fc0-Fc1) > 1/Tobs: print('WARNING: Frequency is evolving over more than one frequency bin.') print('F0 = {0}, F1 = {1}, delta f = {2}'.format(Fc0, Fc1, 1/Tobs)) # get gammadot for earth term if periEv==False: gammadot = 0.0 else: gammadot = eu.get_gammadot(F, mc, q, e0) if nset is not None: nharm = nset elif useFile: if e0 > 0.001 and e0 < 0.999: nharm = min(int(ecc_interp(e0)), nmax) + 1 elif e0 < 0.001: nharm = 3 else: nharm = nmax else: nharm = nmax ##### earth term ##### splus, scross = eu.calculate_splus_scross(nharm, mc, dist, F, e0, toas, l0, gamma0, gammadot, inc) ##### pulsar term ##### if psrTerm: # convert units pd *= eu.KPC2S # convert from kpc to seconds # get pulsar time tp = toas - pd * (1-cosMu) # solve coupled system of equations to get pulsar term values y = eu.solve_coupled_ecc_solution(F, e0, gamma0, l0, mc, q, N.array([0.0, tp.min()])) # get pulsar term values if N.any(y): Fp, ep, gp, lp = y[-1,:] # get gammadot at pulsar term gammadotp = eu.get_gammadot(Fp, mc, q, ep) if useFile: if ep > 0.001 and ep < 0.999: nharm = min(int(ecc_interp(ep)), nmax) elif ep < 0.001: nharm = 3 else: nharm = nmax else: nharm = nmax splusp, scrossp = eu.calculate_splus_scross(nharm, mc, dist, Fp, ep, toas, lp, gp, gammadotp, inc) rr = (fplus*cos2psi - fcross*sin2psi) * (splusp - splus) + \ (fplus*sin2psi + fcross*cos2psi) * (scrossp - scross) else: rr = N.zeros(len(p.toas)) else: rr = - (fplus*cos2psi - fcross*sin2psi) * splus - \ (fplus*sin2psi + fcross*cos2psi) * scross psr.stoas[:] += rr/86400
Simulate GW from eccentric SMBHB. Waveform models from Taylor et al. (2015) and Barack and Cutler (2004). WARNING: This residual waveform is only accurate if the GW frequency is not significantly evolving over the observation time of the pulsar. :param psr: pulsar object :param gwtheta: Polar angle of GW source in celestial coords [radians] :param gwphi: Azimuthal angle of GW source in celestial coords [radians] :param mc: Chirp mass of SMBMB [solar masses] :param dist: Luminosity distance to SMBMB [Mpc] :param F: Orbital frequency of SMBHB [Hz] :param inc: Inclination of GW source [radians] :param psi: Polarization of GW source [radians] :param gamma0: Initial angle of periastron [radians] :param e0: Initial eccentricity of SMBHB :param l0: Initial mean anomaly [radians] :param q: Mass ratio of SMBHB :param nmax: Number of harmonics to use in waveform decomposition :param nset: Fix the number of harmonics to be injected :param pd: Pulsar distance [kpc] :param periEv: Evolve the position of periapsis [boolean] :param psrTerm: Option to include pulsar term [boolean] :param tref: Fiducial time at which initial parameters are referenced [s] :param check: Check if frequency evolves significantly over obs. time :param useFile: Use pre-computed table of number of harmonics vs eccentricity :returns: Vector of induced residuals
def jsonify_payload(self): """ Dump the payload to JSON """ # Assume already json serialized if isinstance(self.payload, string_types): return self.payload return json.dumps(self.payload, cls=StandardJSONEncoder)
Dump the payload to JSON
def _get_client(self): """ OSS2 Auth client Returns: oss2.Auth or oss2.StsAuth: client """ return (_oss.StsAuth if 'security_token' in self._storage_parameters else _oss.Auth if self._storage_parameters else _oss.AnonymousAuth)(**self._storage_parameters)
OSS2 Auth client Returns: oss2.Auth or oss2.StsAuth: client
def pool_info(name=None, **kwargs): ''' Return informations on a storage pool provided its name. :param name: libvirt storage pool name :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults If no name is provided, return the infos for all defined storage pools. .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.pool_info default ''' result = {} conn = __get_conn(**kwargs) def _pool_extract_infos(pool): ''' Format the pool info dictionary :param pool: the libvirt pool object ''' states = ['inactive', 'building', 'running', 'degraded', 'inaccessible'] infos = pool.info() state = states[infos[0]] if infos[0] < len(states) else 'unknown' desc = ElementTree.fromstring(pool.XMLDesc()) path_node = desc.find('target/path') return { 'uuid': pool.UUIDString(), 'state': state, 'capacity': infos[1], 'allocation': infos[2], 'free': infos[3], 'autostart': pool.autostart(), 'persistent': pool.isPersistent(), 'target_path': path_node.text if path_node is not None else None, 'type': desc.get('type') } try: pools = [pool for pool in conn.listAllStoragePools() if name is None or pool.name() == name] result = {pool.name(): _pool_extract_infos(pool) for pool in pools} except libvirt.libvirtError as err: log.debug('Silenced libvirt error: %s', str(err)) finally: conn.close() return result
Return informations on a storage pool provided its name. :param name: libvirt storage pool name :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults If no name is provided, return the infos for all defined storage pools. .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.pool_info default
def return_buffer_contents(self, frame, force_unescaped=False): """Return the buffer contents of the frame.""" if not force_unescaped: if frame.eval_ctx.volatile: self.writeline('if context.eval_ctx.autoescape:') self.indent() self.writeline('return Markup(concat(%s))' % frame.buffer) self.outdent() self.writeline('else:') self.indent() self.writeline('return concat(%s)' % frame.buffer) self.outdent() return elif frame.eval_ctx.autoescape: self.writeline('return Markup(concat(%s))' % frame.buffer) return self.writeline('return concat(%s)' % frame.buffer)
Return the buffer contents of the frame.
def _encode_secret_part_v2_v3(version, condition, root_key, ns): '''Creates a version 2 or version 3 secret part of the third party caveat. The returned data is not encrypted. The format has the following packed binary fields: version 2 or 3 [1 byte] root key length [n: uvarint] root key [n bytes] namespace length [n: uvarint] (v3 only) namespace [n bytes] (v3 only) predicate [rest of message] ''' data = bytearray() data.append(version) encode_uvarint(len(root_key), data) data.extend(root_key) if version >= VERSION_3: encode_uvarint(len(ns), data) data.extend(ns) data.extend(condition.encode('utf-8')) return bytes(data)
Creates a version 2 or version 3 secret part of the third party caveat. The returned data is not encrypted. The format has the following packed binary fields: version 2 or 3 [1 byte] root key length [n: uvarint] root key [n bytes] namespace length [n: uvarint] (v3 only) namespace [n bytes] (v3 only) predicate [rest of message]
def ciphertext(self, be_secure=True): """Return the ciphertext of the EncryptedNumber. Choosing a random number is slow. Therefore, methods like :meth:`__add__` and :meth:`__mul__` take a shortcut and do not follow Paillier encryption fully - every encrypted sum or product should be multiplied by r ** :attr:`~PaillierPublicKey.n` for random r < n (i.e., the result is obfuscated). Not obfuscating provides a big speed up in, e.g., an encrypted dot product: each of the product terms need not be obfuscated, since only the final sum is shared with others - only this final sum needs to be obfuscated. Not obfuscating is OK for internal use, where you are happy for your own computer to know the scalars you've been adding and multiplying to the original ciphertext. But this is *not* OK if you're going to be sharing the new ciphertext with anyone else. So, by default, this method returns an obfuscated ciphertext - obfuscating it if necessary. If instead you set `be_secure=False` then the ciphertext will be returned, regardless of whether it has already been obfuscated. We thought that this approach, while a little awkward, yields a safe default while preserving the option for high performance. Args: be_secure (bool): If any untrusted parties will see the returned ciphertext, then this should be True. Returns: an int, the ciphertext. If `be_secure=False` then it might be possible for attackers to deduce numbers involved in calculating this ciphertext. """ if be_secure and not self.__is_obfuscated: self.obfuscate() return self.__ciphertext
Return the ciphertext of the EncryptedNumber. Choosing a random number is slow. Therefore, methods like :meth:`__add__` and :meth:`__mul__` take a shortcut and do not follow Paillier encryption fully - every encrypted sum or product should be multiplied by r ** :attr:`~PaillierPublicKey.n` for random r < n (i.e., the result is obfuscated). Not obfuscating provides a big speed up in, e.g., an encrypted dot product: each of the product terms need not be obfuscated, since only the final sum is shared with others - only this final sum needs to be obfuscated. Not obfuscating is OK for internal use, where you are happy for your own computer to know the scalars you've been adding and multiplying to the original ciphertext. But this is *not* OK if you're going to be sharing the new ciphertext with anyone else. So, by default, this method returns an obfuscated ciphertext - obfuscating it if necessary. If instead you set `be_secure=False` then the ciphertext will be returned, regardless of whether it has already been obfuscated. We thought that this approach, while a little awkward, yields a safe default while preserving the option for high performance. Args: be_secure (bool): If any untrusted parties will see the returned ciphertext, then this should be True. Returns: an int, the ciphertext. If `be_secure=False` then it might be possible for attackers to deduce numbers involved in calculating this ciphertext.
def make_site_obj(argdict): '''Instantiate and return the site. This will be used for all commands''' d = os.getcwd() #'.' if 'directory' in argdict: d = argdict['directory'] try: s = s2site.Site(d) except: print "Could not instantiate site object." sys.exit() return s
Instantiate and return the site. This will be used for all commands
def fromstring(cls, dis_string): """Create a DisRSTTree instance from a string containing a *.dis parse.""" temp = tempfile.NamedTemporaryFile(delete=False) temp.write(dis_string) temp.close() dis_tree = cls(dis_filepath=temp.name) os.unlink(temp.name) return dis_tree
Create a DisRSTTree instance from a string containing a *.dis parse.
def RACCU_calc(TOP, P, POP): """ Calculate RACCU (Random accuracy unbiased). :param TOP: test outcome positive :type TOP : int :param P: condition positive :type P : int :param POP: population :type POP : int :return: RACCU as float """ try: result = ((TOP + P) / (2 * POP))**2 return result except Exception: return "None"
Calculate RACCU (Random accuracy unbiased). :param TOP: test outcome positive :type TOP : int :param P: condition positive :type P : int :param POP: population :type POP : int :return: RACCU as float
def remove_fetcher(self, fetcher): """Remove a running fetcher from the list of active fetchers. :Parameters: - `fetcher`: fetcher instance. :Types: - `fetcher`: `CacheFetcher`""" self._lock.acquire() try: for t, f in list(self._active_fetchers): if f is fetcher: self._active_fetchers.remove((t, f)) f._deactivated() return finally: self._lock.release()
Remove a running fetcher from the list of active fetchers. :Parameters: - `fetcher`: fetcher instance. :Types: - `fetcher`: `CacheFetcher`
def close(self, suppress_logging=False): """ purges all connections. method closes ampq connection (disconnects) """ for publisher in self.publishers: try: publisher.close() except Exception as e: self.logger.error('Exception on closing Flopsy Publisher {0}: {1}'.format(self.name, e), exc_info=not suppress_logging) self.publishers.clear()
purges all connections. method closes ampq connection (disconnects)
def handle_key_rotate(self, now): ''' Rotate the AES key rotation ''' to_rotate = False dfn = os.path.join(self.opts['cachedir'], '.dfn') try: stats = os.stat(dfn) # Basic Windows permissions don't distinguish between # user/group/all. Check for read-only state instead. if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK): to_rotate = True # Cannot delete read-only files on Windows. os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR) elif stats.st_mode == 0o100400: to_rotate = True else: log.error('Found dropfile with incorrect permissions, ignoring...') os.remove(dfn) except os.error: pass if self.opts.get('publish_session'): if now - self.rotate >= self.opts['publish_session']: to_rotate = True if to_rotate: log.info('Rotating master AES key') for secret_key, secret_map in six.iteritems(SMaster.secrets): # should be unnecessary-- since no one else should be modifying with secret_map['secret'].get_lock(): secret_map['secret'].value = salt.utils.stringutils.to_bytes(secret_map['reload']()) self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key') self.rotate = now if self.opts.get('ping_on_rotate'): # Ping all minions to get them to pick up the new key log.debug('Pinging all connected minions ' 'due to key rotation') salt.utils.master.ping_all_connected_minions(self.opts)
Rotate the AES key rotation
def clone(cls, repo_location, repo_dir=None, branch_or_tag=None, temp=False): """Clone repo at repo_location into repo_dir and checkout branch_or_tag. Defaults into current working directory if repo_dir is not supplied. If 'temp' is True, a temporary directory will be created for you and the repository will be cloned into it. The tempdir is scheduled for deletion (when the process exits) through an exit function registered with the atexit module. If 'temp' is True, repo_dir is ignored. If branch_or_tag is not specified, the HEAD of the primary branch of the cloned repo is checked out. """ if temp: reponame = repo_location.rsplit('/', 1)[-1] suffix = '%s.temp_simpl_GitRepo' % '_'.join( [str(x) for x in (reponame, branch_or_tag) if x]) repo_dir = create_tempdir(suffix=suffix, delete=True) else: repo_dir = repo_dir or os.getcwd() git_clone(repo_dir, repo_location, branch_or_tag=branch_or_tag) # assuming no errors return cls(repo_dir)
Clone repo at repo_location into repo_dir and checkout branch_or_tag. Defaults into current working directory if repo_dir is not supplied. If 'temp' is True, a temporary directory will be created for you and the repository will be cloned into it. The tempdir is scheduled for deletion (when the process exits) through an exit function registered with the atexit module. If 'temp' is True, repo_dir is ignored. If branch_or_tag is not specified, the HEAD of the primary branch of the cloned repo is checked out.
def trimquants(self, col: str, inf: float, sup: float): """ Remove superior and inferior quantiles from the dataframe :param col: column name :type col: str :param inf: inferior quantile :type inf: float :param sup: superior quantile :type sup: float :example: ``ds.trimquants("Col 1", 0.01, 0.99)`` """ try: self.df = self._trimquants(col, inf, sup) except Exception as e: self.err(e, self.trimquants, "Can not trim quantiles")
Remove superior and inferior quantiles from the dataframe :param col: column name :type col: str :param inf: inferior quantile :type inf: float :param sup: superior quantile :type sup: float :example: ``ds.trimquants("Col 1", 0.01, 0.99)``
def mine_get(tgt, fun, tgt_type='glob', opts=None): ''' Gathers the data from the specified minions' mine, pass in the target, function to look up and the target type ''' ret = {} serial = salt.payload.Serial(opts) checker = CkMinions(opts) _res = checker.check_minions( tgt, tgt_type) minions = _res['minions'] cache = salt.cache.factory(opts) if isinstance(fun, six.string_types): functions = list(set(fun.split(','))) _ret_dict = len(functions) > 1 elif isinstance(fun, list): functions = fun _ret_dict = True else: return {} for minion in minions: mdata = cache.fetch('minions/{0}'.format(minion), 'mine') if not isinstance(mdata, dict): continue if not _ret_dict and functions and functions[0] in mdata: ret[minion] = mdata.get(functions) elif _ret_dict: for fun in functions: if fun in mdata: ret.setdefault(fun, {})[minion] = mdata.get(fun) return ret
Gathers the data from the specified minions' mine, pass in the target, function to look up and the target type
def GetElapsedMs(self): '''Retrieves the number of milliseconds that have passed in the virtual machine since it last started running on the server. The count of elapsed time restarts each time the virtual machine is powered on, resumed, or migrated using VMotion. This value counts milliseconds, regardless of whether the virtual machine is using processing power during that time. You can combine this value with the CPU time used by the virtual machine (VMGuestLib_GetCpuUsedMs) to estimate the effective virtual machine CPU speed. cpuUsedMs is a subset of this value.''' counter = c_uint64() ret = vmGuestLib.VMGuestLib_GetElapsedMs(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
Retrieves the number of milliseconds that have passed in the virtual machine since it last started running on the server. The count of elapsed time restarts each time the virtual machine is powered on, resumed, or migrated using VMotion. This value counts milliseconds, regardless of whether the virtual machine is using processing power during that time. You can combine this value with the CPU time used by the virtual machine (VMGuestLib_GetCpuUsedMs) to estimate the effective virtual machine CPU speed. cpuUsedMs is a subset of this value.
def moments_XX(X, remove_mean=False, modify_data=False, weights=None, sparse_mode='auto', sparse_tol=0.0, column_selection=None, diag_only=False): r""" Computes the first two unnormalized moments of X Computes :math:`s = \sum_t x_t` and :math:`C = X^\top X` while exploiting zero or constant columns in the data matrix. Parameters ---------- X : ndarray (T, M) Data matrix remove_mean : bool True: remove column mean from the data, False: don't remove mean. modify_data : bool If remove_mean=True, the mean will be removed in the data matrix X, without creating an independent copy. This option is faster but might lead to surprises because your input array is changed. weights: None or ndarray(T, ) weights assigned to each trajectory point. If None, all data points have weight one. If ndarray, each data point is assigned a separate weight. sparse_mode : str one of: * 'dense' : always use dense mode * 'sparse' : always use sparse mode if possible * 'auto' : automatic sparse_tol: float Threshold for considering column to be zero in order to save computing effort when the data is sparse or almost sparse. If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y is not given) of the covariance matrix will be set to zero. If Y is given and max(abs(Y[:, i])) < sparse_tol, then column i of the covariance matrix will be set to zero. column_selection: ndarray(k, dtype=int) or None Indices of those columns that are to be computed. If None, all columns are computed. diag_only: bool If True, the computation is restricted to the diagonal entries (autocorrelations) only. Returns ------- w : float statistical weight s : ndarray (M) sum C : ndarray (M, M) unnormalized covariance matrix """ # Check consistency of inputs: if weights is not None: assert X.shape[0] == weights.shape[0], 'X and weights_x must have equal length' # diag_only is only implemented for dense mode if diag_only and sparse_mode is not 'dense': if sparse_mode is 'sparse': import warnings warnings.warn('Computing diagonal entries only is not implemented for sparse mode. Switching to dense mode.') sparse_mode = 'dense' # sparsify X0, mask_X, xconst = _sparsify(X, remove_mean=remove_mean, modify_data=modify_data, sparse_mode=sparse_mode, sparse_tol=sparse_tol) is_sparse = mask_X is not None # copy / convert # TODO: do we need to copy xconst? X0, xconst = _copy_convert(X0, const=xconst, remove_mean=remove_mean, copy=is_sparse or (remove_mean and not modify_data)) # sum / center w, sx, sx0_centered = _sum(X0, xmask=mask_X, xconst=xconst, symmetric=False, remove_mean=remove_mean, weights=weights) if remove_mean: _center(X0, w, sx, mask=mask_X, const=xconst, inplace=True) # fast in-place centering # TODO: we could make a second const check here. If after summation not enough zeros have appeared in the # TODO: consts, we switch back to dense treatment here. # compute covariance matrix if column_selection is not None: if is_sparse: Xk = X[:, column_selection] mask_Xk = mask_X[column_selection] X0k = Xk[:, mask_Xk] xksum = sx0_centered[column_selection] xkconst = Xk[0, ~mask_Xk] X0k, xkconst = _copy_convert(X0k, const=xkconst, remove_mean=remove_mean, copy=True) C = _M2(X0, X0k, mask_X=mask_X, mask_Y=mask_Xk, xsum=sx0_centered, xconst=xconst, ysum=xksum, yconst=xkconst, weights=weights) else: X0k = X0[:, column_selection] C = _M2(X0, X0k, mask_X=mask_X, mask_Y=mask_X, xsum=sx0_centered, xconst=xconst, ysum=sx0_centered[column_selection], yconst=xconst, weights=weights) else: C = _M2(X0, X0, mask_X=mask_X, mask_Y=mask_X, xsum=sx0_centered, xconst=xconst, ysum=sx0_centered, yconst=xconst, weights=weights, diag_only=diag_only) return w, sx, C
r""" Computes the first two unnormalized moments of X Computes :math:`s = \sum_t x_t` and :math:`C = X^\top X` while exploiting zero or constant columns in the data matrix. Parameters ---------- X : ndarray (T, M) Data matrix remove_mean : bool True: remove column mean from the data, False: don't remove mean. modify_data : bool If remove_mean=True, the mean will be removed in the data matrix X, without creating an independent copy. This option is faster but might lead to surprises because your input array is changed. weights: None or ndarray(T, ) weights assigned to each trajectory point. If None, all data points have weight one. If ndarray, each data point is assigned a separate weight. sparse_mode : str one of: * 'dense' : always use dense mode * 'sparse' : always use sparse mode if possible * 'auto' : automatic sparse_tol: float Threshold for considering column to be zero in order to save computing effort when the data is sparse or almost sparse. If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y is not given) of the covariance matrix will be set to zero. If Y is given and max(abs(Y[:, i])) < sparse_tol, then column i of the covariance matrix will be set to zero. column_selection: ndarray(k, dtype=int) or None Indices of those columns that are to be computed. If None, all columns are computed. diag_only: bool If True, the computation is restricted to the diagonal entries (autocorrelations) only. Returns ------- w : float statistical weight s : ndarray (M) sum C : ndarray (M, M) unnormalized covariance matrix
def _set_shape(self, shape): """Private on purpose.""" try: shape = (int(shape),) except TypeError: pass shp = list(shape) shp[0] = timetools.Period('366d')/self.simulationstep shp[0] = int(numpy.ceil(round(shp[0], 10))) getattr(self.fastaccess, self.name).ratios = numpy.zeros( shp, dtype=float)
Private on purpose.
def get_me(self): """Return a LoggedInRedditor object. Note: This function is only intended to be used with an 'identity' providing OAuth2 grant. """ response = self.request_json(self.config['me']) user = objects.Redditor(self, response['name'], response) user.__class__ = objects.LoggedInRedditor return user
Return a LoggedInRedditor object. Note: This function is only intended to be used with an 'identity' providing OAuth2 grant.
def colorize_text(self, text): """Colorize the text.""" # As originally implemented, this method acts upon all the contents of # the file as a single string using the MULTILINE option of the re # package. I believe this was ostensibly for performance reasons, but # it has a few side effects that are less than ideal. It's non-trivial # to avoid some substitutions based on other matches using this # technique, for example. In the case of block indents, e.g., backticks # that occur in the example ($ pwd is `pwd`) should not be escaped. # With the MULTILINE flag that is not simple. colorize_backticks() is # currently operating on a line by line basis and special casing for # this scenario. If these special cases proliferate, the line breaking # should occur here in order to minimize the number of iterations. result = text result = self.colorize_heading(result) result = self.colorize_block_indent(result) result = self.colorize_backticks(result) return result
Colorize the text.
def _values(self): """Getter for series values (flattened)""" if self.interpolate: return [ val[0] for serie in self.series for val in serie.interpolated ] else: return super(Line, self)._values
Getter for series values (flattened)
def process_event(self, event, ipmicmd, seldata): """Modify an event according with OEM understanding. Given an event, allow an OEM module to augment it. For example, event data fields can have OEM bytes. Other times an OEM may wish to apply some transform to some field to suit their conventions. """ event['oem_handler'] = None evdata = event['event_data_bytes'] if evdata[0] & 0b11000000 == 0b10000000: event['oem_byte2'] = evdata[1] if evdata[0] & 0b110000 == 0b100000: event['oem_byte3'] = evdata[2]
Modify an event according with OEM understanding. Given an event, allow an OEM module to augment it. For example, event data fields can have OEM bytes. Other times an OEM may wish to apply some transform to some field to suit their conventions.
def publish_topology_closed(self, topology_id): """Publish a TopologyClosedEvent to all topology listeners. :Parameters: - `topology_id`: A unique identifier for the topology this server is a part of. """ event = TopologyClosedEvent(topology_id) for subscriber in self.__topology_listeners: try: subscriber.closed(event) except Exception: _handle_exception()
Publish a TopologyClosedEvent to all topology listeners. :Parameters: - `topology_id`: A unique identifier for the topology this server is a part of.
def select_pane(self, target_pane): """ Return selected :class:`Pane` through ``$ tmux select-pane``. Parameters ---------- target_pane : str 'target_pane', '-U' ,'-D', '-L', '-R', or '-l'. Return ------ :class:`Pane` """ if target_pane in ['-l', '-U', '-D', '-L', '-R']: proc = self.cmd('select-pane', '-t%s' % self.id, target_pane) else: proc = self.cmd('select-pane', '-t%s' % target_pane) if proc.stderr: raise exc.LibTmuxException(proc.stderr) return self.attached_pane
Return selected :class:`Pane` through ``$ tmux select-pane``. Parameters ---------- target_pane : str 'target_pane', '-U' ,'-D', '-L', '-R', or '-l'. Return ------ :class:`Pane`
def company(random=random, *args, **kwargs): """ Produce a company name >>> mock_random.seed(0) >>> company(random=mock_random) 'faculty of applied chimp' >>> mock_random.seed(1) >>> company(random=mock_random) 'blistersecret studios' >>> mock_random.seed(2) >>> company(random=mock_random) 'pooppooppoop studios' >>> mock_random.seed(3) >>> company(random=mock_random) 'britchesshop' >>> mock_random.seed(4) >>> company(random=mock_random, capitalize=True) 'Mystery Studies Department' >>> mock_random.seed(5) >>> company(random=mock_random, slugify=True) 'the-law-offices-of-magnificentslap-boatbench-and-smellmouse' """ return random.choice([ "faculty of applied {noun}", "{noun}{second_noun} studios", "{noun}{noun}{noun} studios", "{noun}shop", "{noun} studies department", "the law offices of {lastname}, {noun}, and {other_lastname}", "{country} ministry of {plural}", "{city} municipal {noun} department", "{city} plumbing", "department of {noun} studies", "{noun} management systems", "{plural} r us", "inter{verb}", "the {noun} warehouse", "integrated {noun} and {second_noun}", "the {noun} and {second_noun} pub", "e-cyber{verb}", "{adjective}soft", "{domain} Inc.", "{thing} incorporated", "{noun}co", ]).format(noun=noun(random=random), plural=plural(random=random), country=country(random=random), city=city(random=random), adjective=adjective(random=random), lastname=lastname(random=random), other_lastname=lastname(random=random), domain=domain(random=random), second_noun=noun(random=random), verb=verb(random=random), thing=thing(random=random))
Produce a company name >>> mock_random.seed(0) >>> company(random=mock_random) 'faculty of applied chimp' >>> mock_random.seed(1) >>> company(random=mock_random) 'blistersecret studios' >>> mock_random.seed(2) >>> company(random=mock_random) 'pooppooppoop studios' >>> mock_random.seed(3) >>> company(random=mock_random) 'britchesshop' >>> mock_random.seed(4) >>> company(random=mock_random, capitalize=True) 'Mystery Studies Department' >>> mock_random.seed(5) >>> company(random=mock_random, slugify=True) 'the-law-offices-of-magnificentslap-boatbench-and-smellmouse'
def _create_sot_file(self): """Create Source of Truth file to compare.""" # Bug on on NX-OS 6.2.16 where overwriting sot_file would take exceptionally long time # (over 12 minutes); so just delete the sot_file try: self._delete_file(filename="sot_file") except Exception: pass commands = [ "terminal dont-ask", "checkpoint file sot_file", "no terminal dont-ask", ] self._send_command_list(commands)
Create Source of Truth file to compare.
def discover_modules(self): ''' Return module sequence discovered from ``self.package_name`` Parameters ---------- None Returns ------- mods : sequence Sequence of module names within ``self.package_name`` Examples -------- >>> dw = ApiDocWriter('sphinx') >>> mods = dw.discover_modules() >>> 'sphinx.util' in mods True >>> dw.package_skip_patterns.append('\.util$') >>> 'sphinx.util' in dw.discover_modules() False >>> ''' modules = [self.package_name] # raw directory parsing for dirpath, dirnames, filenames in os.walk(self.root_path): # Check directory names for packages root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) # Normally, we'd only iterate over dirnames, but since # dipy does not import a whole bunch of modules we'll # include those here as well (the *.py filenames). filenames = [f[:-3] for f in filenames if f.endswith('.py') and not f.startswith('__init__')] for filename in filenames: package_uri = '/'.join((dirpath, filename)) for subpkg_name in dirnames + filenames: package_uri = '.'.join((root_uri, subpkg_name)) package_path = self._uri2path(package_uri) if (package_path and self._survives_exclude(package_uri, 'package')): modules.append(package_uri) return sorted(modules)
Return module sequence discovered from ``self.package_name`` Parameters ---------- None Returns ------- mods : sequence Sequence of module names within ``self.package_name`` Examples -------- >>> dw = ApiDocWriter('sphinx') >>> mods = dw.discover_modules() >>> 'sphinx.util' in mods True >>> dw.package_skip_patterns.append('\.util$') >>> 'sphinx.util' in dw.discover_modules() False >>>
def add_page_if_missing(request): """ Returns ``feincms_page`` for request. """ try: page = Page.objects.for_request(request, best_match=True) return { 'leonardo_page': page, # DEPRECATED 'feincms_page': page, } except Page.DoesNotExist: return {}
Returns ``feincms_page`` for request.
def update(self, reseed): """ Update that trail! :param reseed: Whether we are in the normal reseed cycle or not. """ if self._clear: for i in range(0, 3): self._screen.print_at(" ", self._x, self._screen.start_line + self._y + i) self._maybe_reseed(reseed) else: for i in range(0, 3): self._screen.print_at(chr(randint(32, 126)), self._x, self._screen.start_line + self._y + i, Screen.COLOUR_GREEN) for i in range(4, 6): self._screen.print_at(chr(randint(32, 126)), self._x, self._screen.start_line + self._y + i, Screen.COLOUR_GREEN, Screen.A_BOLD) self._maybe_reseed(reseed)
Update that trail! :param reseed: Whether we are in the normal reseed cycle or not.
def evaluate_world_model( real_env, hparams, world_model_dir, debug_video_path, split=tf.estimator.ModeKeys.EVAL, ): """Evaluate the world model (reward accuracy).""" frame_stack_size = hparams.frame_stack_size rollout_subsequences = [] def initial_frame_chooser(batch_size): assert batch_size == len(rollout_subsequences) return np.stack([ [frame.observation.decode() for frame in subsequence[:frame_stack_size]] # pylint: disable=g-complex-comprehension for subsequence in rollout_subsequences ]) env_fn = rl.make_simulated_env_fn_from_hparams( real_env, hparams, batch_size=hparams.wm_eval_batch_size, initial_frame_chooser=initial_frame_chooser, model_dir=world_model_dir ) sim_env = env_fn(in_graph=False) subsequence_length = int( max(hparams.wm_eval_rollout_ratios) * hparams.simulated_rollout_length ) rollouts = real_env.current_epoch_rollouts( split=split, minimal_rollout_frames=(subsequence_length + frame_stack_size) ) video_writer = common_video.WholeVideoWriter( fps=10, output_path=debug_video_path, file_format="avi" ) reward_accuracies_by_length = { int(ratio * hparams.simulated_rollout_length): [] for ratio in hparams.wm_eval_rollout_ratios } for _ in range(hparams.wm_eval_num_batches): rollout_subsequences[:] = random_rollout_subsequences( rollouts, hparams.wm_eval_batch_size, subsequence_length + frame_stack_size ) eval_subsequences = [ subsequence[(frame_stack_size - 1):] for subsequence in rollout_subsequences ] # Check that the initial observation is the same in the real and simulated # rollout. sim_init_obs = sim_env.reset() def decode_real_obs(index): return np.stack([ subsequence[index].observation.decode() for subsequence in eval_subsequences # pylint: disable=cell-var-from-loop ]) real_init_obs = decode_real_obs(0) assert np.all(sim_init_obs == real_init_obs) debug_frame_batches = [] def append_debug_frame_batch(sim_obs, real_obs, sim_cum_rews, real_cum_rews, sim_rews, real_rews): """Add a debug frame.""" rews = [[sim_cum_rews, sim_rews], [real_cum_rews, real_rews]] headers = [] for j in range(len(sim_obs)): local_nps = [] for i in range(2): img = PIL_Image().new("RGB", (sim_obs.shape[-2], 11),) draw = PIL_ImageDraw().Draw(img) draw.text((0, 0), "c:{:3}, r:{:3}".format(int(rews[i][0][j]), int(rews[i][1][j])), fill=(255, 0, 0)) local_nps.append(np.asarray(img)) local_nps.append(np.zeros_like(local_nps[0])) headers.append(np.concatenate(local_nps, axis=1)) errs = absolute_hinge_difference(sim_obs, real_obs) headers = np.stack(headers) debug_frame_batches.append( # pylint: disable=cell-var-from-loop np.concatenate([headers, np.concatenate([sim_obs, real_obs, errs], axis=2)], axis=1) ) append_debug_frame_batch(sim_init_obs, real_init_obs, np.zeros(hparams.wm_eval_batch_size), np.zeros(hparams.wm_eval_batch_size), np.zeros(hparams.wm_eval_batch_size), np.zeros(hparams.wm_eval_batch_size)) (sim_cum_rewards, real_cum_rewards) = ( np.zeros(hparams.wm_eval_batch_size) for _ in range(2) ) for i in range(subsequence_length): actions = [subsequence[i].action for subsequence in eval_subsequences] (sim_obs, sim_rewards, _) = sim_env.step(actions) sim_cum_rewards += sim_rewards real_rewards = np.array([ subsequence[i + 1].reward for subsequence in eval_subsequences ]) real_cum_rewards += real_rewards for (length, reward_accuracies) in six.iteritems( reward_accuracies_by_length ): if i + 1 == length: reward_accuracies.append( np.sum(sim_cum_rewards == real_cum_rewards) / len(real_cum_rewards) ) real_obs = decode_real_obs(i + 1) append_debug_frame_batch(sim_obs, real_obs, sim_cum_rewards, real_cum_rewards, sim_rewards, real_rewards) for debug_frames in np.stack(debug_frame_batches, axis=1): debug_frame = None for debug_frame in debug_frames: video_writer.write(debug_frame) if debug_frame is not None: # Append two black frames for aesthetics. for _ in range(2): video_writer.write(np.zeros_like(debug_frame)) video_writer.finish_to_disk() return { "reward_accuracy/at_{}".format(length): np.mean(reward_accuracies) for (length, reward_accuracies) in six.iteritems( reward_accuracies_by_length ) }
Evaluate the world model (reward accuracy).
def get_too_few_non_zero_degree_day_warning( model_type, balance_point, degree_day_type, degree_days, minimum_non_zero ): """ Return an empty list or a single warning wrapped in a list regarding non-zero degree days for a set of degree days. Parameters ---------- model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). balance_point : :any:`float` The balance point in question. degree_day_type : :any:`str` The type of degree days (``'cdd'`` or ``'hdd'``). degree_days : :any:`pandas.Series` A series of degree day values. minimum_non_zero : :any:`int` Minimum allowable number of non-zero degree day values. Returns ------- warnings : :any:`list` of :any:`eemeter.EEMeterWarning` Empty list or list of single warning. """ warnings = [] n_non_zero = int((degree_days > 0).sum()) if n_non_zero < minimum_non_zero: warnings.append( EEMeterWarning( qualified_name=( "eemeter.caltrack_daily.{model_type}.too_few_non_zero_{degree_day_type}".format( model_type=model_type, degree_day_type=degree_day_type ) ), description=( "Number of non-zero daily {degree_day_type} values below accepted minimum." " Candidate fit not attempted.".format( degree_day_type=degree_day_type.upper() ) ), data={ "n_non_zero_{degree_day_type}".format( degree_day_type=degree_day_type ): n_non_zero, "minimum_non_zero_{degree_day_type}".format( degree_day_type=degree_day_type ): minimum_non_zero, "{degree_day_type}_balance_point".format( degree_day_type=degree_day_type ): balance_point, }, ) ) return warnings
Return an empty list or a single warning wrapped in a list regarding non-zero degree days for a set of degree days. Parameters ---------- model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). balance_point : :any:`float` The balance point in question. degree_day_type : :any:`str` The type of degree days (``'cdd'`` or ``'hdd'``). degree_days : :any:`pandas.Series` A series of degree day values. minimum_non_zero : :any:`int` Minimum allowable number of non-zero degree day values. Returns ------- warnings : :any:`list` of :any:`eemeter.EEMeterWarning` Empty list or list of single warning.
def setWidth(self, vehID, width): """setWidth(string, double) -> None Sets the width in m for this vehicle. """ self._connection._sendDoubleCmd( tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_WIDTH, vehID, width)
setWidth(string, double) -> None Sets the width in m for this vehicle.
def get_preview_name(self): """Returns .SAFE name of full resolution L1C preview :return: name of preview file :rtype: str """ if self.safe_type == EsaSafeType.OLD_TYPE: name = _edit_name(self.tile_id, AwsConstants.PVI, delete_end=True) else: name = '_'.join([self.tile_id.split('_')[1], self.get_datatake_time(), AwsConstants.PVI]) return '{}.jp2'.format(name)
Returns .SAFE name of full resolution L1C preview :return: name of preview file :rtype: str
def set_listener_policy(name, port, policies=None, region=None, key=None, keyid=None, profile=None): ''' Set the policies of an ELB listener. .. versionadded:: 2016.3.0 CLI example: .. code-block:: Bash salt myminion boto_elb.set_listener_policy myelb 443 "[policy1,policy2]" ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not exists(name, region, key, keyid, profile): return True if policies is None: policies = [] try: conn.set_lb_policies_of_listener(name, port, policies) log.info('Set policies %s on ELB %s listener %s', policies, name, port) except boto.exception.BotoServerError as e: log.info('Failed to set policy %s on ELB %s listener %s: %s', policies, name, port, e.message, exc_info_on_loglevel=logging.DEBUG) return False return True
Set the policies of an ELB listener. .. versionadded:: 2016.3.0 CLI example: .. code-block:: Bash salt myminion boto_elb.set_listener_policy myelb 443 "[policy1,policy2]"
def create(self, vals, check=True): """ Overrides orm create method. @param self: The object pointer @param vals: dictionary of fields value. @return: new record set for hotel folio. """ if not 'service_lines' and 'folio_id' in vals: tmp_room_lines = vals.get('room_lines', []) vals['order_policy'] = vals.get('hotel_policy', 'manual') vals.update({'room_lines': []}) folio_id = super(HotelFolio, self).create(vals) for line in (tmp_room_lines): line[2].update({'folio_id': folio_id}) vals.update({'room_lines': tmp_room_lines}) folio_id.write(vals) else: if not vals: vals = {} vals['name'] = self.env['ir.sequence'].next_by_code('hotel.folio') vals['duration'] = vals.get('duration', 0.0) or vals.get('duration_dummy', 0.0) folio_id = super(HotelFolio, self).create(vals) folio_room_line_obj = self.env['folio.room.line'] h_room_obj = self.env['hotel.room'] try: for rec in folio_id: if not rec.reservation_id: for room_rec in rec.room_lines: prod = room_rec.product_id.name room_obj = h_room_obj.search([('name', '=', prod)]) room_obj.write({'isroom': False}) vals = {'room_id': room_obj.id, 'check_in': rec.checkin_date, 'check_out': rec.checkout_date, 'folio_id': rec.id, } folio_room_line_obj.create(vals) except: for rec in folio_id: for room_rec in rec.room_lines: prod = room_rec.product_id.name room_obj = h_room_obj.search([('name', '=', prod)]) room_obj.write({'isroom': False}) vals = {'room_id': room_obj.id, 'check_in': rec.checkin_date, 'check_out': rec.checkout_date, 'folio_id': rec.id, } folio_room_line_obj.create(vals) return folio_id
Overrides orm create method. @param self: The object pointer @param vals: dictionary of fields value. @return: new record set for hotel folio.
def create_index(self): """ Override to provide code for creating the target index. By default it will be created without any special settings or mappings. """ es = self._init_connection() if not es.indices.exists(index=self.index): es.indices.create(index=self.index, body=self.settings)
Override to provide code for creating the target index. By default it will be created without any special settings or mappings.
def get_account_info(self): """ Returns a tuple for the number of containers and total bytes in the account. """ headers = self._manager.get_account_headers() return (headers.get("x-account-container-count"), headers.get("x-account-bytes-used"))
Returns a tuple for the number of containers and total bytes in the account.
def get_package_path(name): """Get the path to an installed package. name (unicode): Package name. RETURNS (Path): Path to installed package. """ name = name.lower() # use lowercase version to be safe # Here we're importing the module just to find it. This is worryingly # indirect, but it's otherwise very difficult to find the package. pkg = importlib.import_module(name) return Path(pkg.__file__).parent
Get the path to an installed package. name (unicode): Package name. RETURNS (Path): Path to installed package.
def write_incron_file_verbose(user, path): ''' Writes the contents of a file to a user's incrontab and return error message on error CLI Example: .. code-block:: bash salt '*' incron.write_incron_file_verbose root /tmp/new_incron ''' return __salt__['cmd.run_all'](_get_incron_cmdstr(path), runas=user, python_shell=False)
Writes the contents of a file to a user's incrontab and return error message on error CLI Example: .. code-block:: bash salt '*' incron.write_incron_file_verbose root /tmp/new_incron
def I(r, limbdark): ''' The standard quadratic limb darkening law. :param ndarray r: The radius vector :param limbdark: A :py:class:`pysyzygy.transit.LIMBDARK` instance containing the limb darkening law information :returns: The stellar intensity as a function of `r` ''' if limbdark.ldmodel == QUADRATIC: u1 = limbdark.u1 u2 = limbdark.u2 return (1-u1*(1-np.sqrt(1-r**2))-u2*(1-np.sqrt(1-r**2))**2)/(1-u1/3-u2/6)/np.pi elif limbdark.ldmodel == KIPPING: a = np.sqrt(limbdark.q1) b = 2*limbdark.q2 u1 = a*b u2 = a*(1 - b) return (1-u1*(1-np.sqrt(1-r**2))-u2*(1-np.sqrt(1-r**2))**2)/(1-u1/3-u2/6)/np.pi elif limbdark.ldmodel == NONLINEAR: raise Exception('Nonlinear model not yet implemented!') # TODO! else: raise Exception('Invalid limb darkening model.')
The standard quadratic limb darkening law. :param ndarray r: The radius vector :param limbdark: A :py:class:`pysyzygy.transit.LIMBDARK` instance containing the limb darkening law information :returns: The stellar intensity as a function of `r`
def _output_function_label(self): """ Determines if we want to output the function label in assembly. We output the function label only when the original instruction does not output the function label. :return: True if we should output the function label, False otherwise. :rtype: bool """ if self.asm_code: return True if not self.blocks: return True the_block = next((b for b in self.blocks if b.addr == self.addr), None) if the_block is None: return True if not the_block.instructions: return True if not the_block.instructions[0].labels: return True return False
Determines if we want to output the function label in assembly. We output the function label only when the original instruction does not output the function label. :return: True if we should output the function label, False otherwise. :rtype: bool
def create(self, acl=None): '''Creates a directory, optionally include Acl argument to set permissions''' parent, name = getParentAndBase(self.path) json = { 'name': name } if acl is not None: json['acl'] = acl.to_api_param() response = self.client.postJsonHelper(DataDirectory._getUrl(parent), json, False) if (response.status_code != 200): raise DataApiError("Directory creation failed: " + str(response.content))
Creates a directory, optionally include Acl argument to set permissions
def extract_files_from_dict(d): """Return any file objects from the provided dict. >>> extract_files_from_dict({ ... 'oauth_token': 'foo', ... 'track': { ... 'title': 'bar', ... 'asset_data': open('setup.py', 'rb') ... }}) # doctest:+ELLIPSIS {'track': {'asset_data': <...}} """ files = {} for key, value in six.iteritems(d): if isinstance(value, dict): files[key] = extract_files_from_dict(value) elif is_file_like(value): files[key] = value return files
Return any file objects from the provided dict. >>> extract_files_from_dict({ ... 'oauth_token': 'foo', ... 'track': { ... 'title': 'bar', ... 'asset_data': open('setup.py', 'rb') ... }}) # doctest:+ELLIPSIS {'track': {'asset_data': <...}}
def from_raw_script(cls, raw_script): """Creates instance of `Command` from a list of script parts. :type raw_script: [basestring] :rtype: Command :raises: EmptyCommand """ script = format_raw_script(raw_script) if not script: raise EmptyCommand expanded = shell.from_shell(script) output = get_output(script, expanded) return cls(expanded, output)
Creates instance of `Command` from a list of script parts. :type raw_script: [basestring] :rtype: Command :raises: EmptyCommand
def are_equal(self, sp1, sp2): """ True if species are exactly the same, i.e., Fe2+ == Fe2+ but not Fe3+. and the spins are reversed. i.e., spin up maps to spin down, and vice versa. Args: sp1: First species. A dict of {specie/element: amt} as per the definition in Site and PeriodicSite. sp2: Second species. A dict of {specie/element: amt} as per the definition in Site and PeriodicSite. Returns: Boolean indicating whether species are equal. """ for s1 in sp1.keys(): spin1 = getattr(s1, "spin", 0) oxi1 = getattr(s1, "oxi_state", 0) for s2 in sp2.keys(): spin2 = getattr(s2, "spin", 0) oxi2 = getattr(s2, "oxi_state", 0) if (s1.symbol == s2.symbol and oxi1 == oxi2 and spin2 == -spin1): break else: return False return True
True if species are exactly the same, i.e., Fe2+ == Fe2+ but not Fe3+. and the spins are reversed. i.e., spin up maps to spin down, and vice versa. Args: sp1: First species. A dict of {specie/element: amt} as per the definition in Site and PeriodicSite. sp2: Second species. A dict of {specie/element: amt} as per the definition in Site and PeriodicSite. Returns: Boolean indicating whether species are equal.
def output(self, _filename): """ _filename is not used Args: _filename(string) """ txt = '' for c in self.contracts: txt += "\nContract %s\n"%c.name table = PrettyTable(['Variable', 'Dependencies']) for v in c.state_variables: table.add_row([v.name, _get(v, c)]) txt += str(table) txt += "\n" for f in c.functions_and_modifiers_not_inherited: txt += "\nFunction %s\n"%f.full_name table = PrettyTable(['Variable', 'Dependencies']) for v in f.variables: table.add_row([v.name, _get(v, f)]) for v in c.state_variables: table.add_row([v.canonical_name, _get(v, f)]) txt += str(table) self.info(txt)
_filename is not used Args: _filename(string)
def radviz(X, y=None, ax=None, features=None, classes=None, color=None, colormap=None, alpha=1.0, **kwargs): """ Displays each feature as an axis around a circle surrounding a scatter plot whose points are each individual instance. This helper function is a quick wrapper to utilize the RadialVisualizer (Transformer) for one-off analysis. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n An array or series of target or class values ax : matplotlib Axes, default: None The axes to plot the figure on. features : list of strings, default: None The names of the features or columns classes : list of strings, default: None The names of the classes in the target color : list or tuple of colors, default: None Specify the colors for each individual class colormap : string or matplotlib cmap, default: None Sequential colormap for continuous target alpha : float, default: 1.0 Specify a transparency where 1 is completely opaque and 0 is completely transparent. This property makes densely clustered points more visible. Returns ------- ax : matplotlib axes Returns the axes that the parallel coordinates were drawn on. """ # Instantiate the visualizer visualizer = RadialVisualizer( ax, features, classes, color, colormap, alpha, **kwargs ) # Fit and transform the visualizer (calls draw) visualizer.fit(X, y, **kwargs) visualizer.transform(X) # Return the axes object on the visualizer return visualizer.ax
Displays each feature as an axis around a circle surrounding a scatter plot whose points are each individual instance. This helper function is a quick wrapper to utilize the RadialVisualizer (Transformer) for one-off analysis. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n An array or series of target or class values ax : matplotlib Axes, default: None The axes to plot the figure on. features : list of strings, default: None The names of the features or columns classes : list of strings, default: None The names of the classes in the target color : list or tuple of colors, default: None Specify the colors for each individual class colormap : string or matplotlib cmap, default: None Sequential colormap for continuous target alpha : float, default: 1.0 Specify a transparency where 1 is completely opaque and 0 is completely transparent. This property makes densely clustered points more visible. Returns ------- ax : matplotlib axes Returns the axes that the parallel coordinates were drawn on.
def _load_assembly_mapping_data(filename): """ Load assembly mapping data. Parameters ---------- filename : str path to compressed archive with assembly mapping data Returns ------- assembly_mapping_data : dict dict of assembly maps if loading was successful, else None Notes ----- Keys of returned dict are chromosomes and values are the corresponding assembly map. """ try: assembly_mapping_data = {} with tarfile.open(filename, "r") as tar: # http://stackoverflow.com/a/2018576 for member in tar.getmembers(): if ".json" in member.name: with tar.extractfile(member) as tar_file: tar_bytes = tar_file.read() # https://stackoverflow.com/a/42683509/4727627 assembly_mapping_data[member.name.split(".")[0]] = json.loads( tar_bytes.decode("utf-8") ) return assembly_mapping_data except Exception as err: print(err) return None
Load assembly mapping data. Parameters ---------- filename : str path to compressed archive with assembly mapping data Returns ------- assembly_mapping_data : dict dict of assembly maps if loading was successful, else None Notes ----- Keys of returned dict are chromosomes and values are the corresponding assembly map.
def _validate_data(data): """Validates the given data and raises an error if any non-allowed keys are provided or any required keys are missing. :param data: Data to send to API :type data: dict """ data_keys = set(data.keys()) extra_keys = data_keys - set(ALLOWED_KEYS) missing_keys = set(REQUIRED_KEYS) - data_keys if extra_keys: raise ValueError( 'Invalid data keys {!r}'.format(', '.join(extra_keys)) ) if missing_keys: raise ValueError( 'Missing keys {!r}'.format(', '.join(missing_keys)) )
Validates the given data and raises an error if any non-allowed keys are provided or any required keys are missing. :param data: Data to send to API :type data: dict
def _analyze_variable_attributes(self, attributes): """ Analyze event variable attributes :param attributes: The event variable attributes to parse. :return: None """ # Check for the indexed attribute if 'indexed' in attributes: self._indexed = attributes['indexed'] super(EventVariableSolc, self)._analyze_variable_attributes(attributes)
Analyze event variable attributes :param attributes: The event variable attributes to parse. :return: None
def _EvaluateElementsDataSize(self, context): """Evaluates elements data size. Args: context (DataTypeMapContext): data type map context. Returns: int: elements data size. Raises: MappingError: if the elements data size cannot be determined. """ elements_data_size = None if self._data_type_definition.elements_data_size: elements_data_size = self._data_type_definition.elements_data_size elif self._data_type_definition.elements_data_size_expression: expression = self._data_type_definition.elements_data_size_expression namespace = {} if context and context.values: namespace.update(context.values) # Make sure __builtins__ contains an empty dictionary. namespace['__builtins__'] = {} try: elements_data_size = eval(expression, namespace) # pylint: disable=eval-used except Exception as exception: raise errors.MappingError( 'Unable to determine elements data size with error: {0!s}'.format( exception)) if elements_data_size is None or elements_data_size < 0: raise errors.MappingError( 'Invalid elements data size: {0!s}'.format(elements_data_size)) return elements_data_size
Evaluates elements data size. Args: context (DataTypeMapContext): data type map context. Returns: int: elements data size. Raises: MappingError: if the elements data size cannot be determined.
def p0f_impersonate(pkt, osgenre=None, osdetails=None, signature=None, extrahops=0, mtu=1500, uptime=None): """Modifies pkt so that p0f will think it has been sent by a specific OS. If osdetails is None, then we randomly pick up a personality matching osgenre. If osgenre and signature are also None, we use a local signature (using p0f_getlocalsigs). If signature is specified (as a tuple), we use the signature. For now, only TCP Syn packets are supported. Some specifications of the p0f.fp file are not (yet) implemented.""" pkt = pkt.copy() # pkt = pkt.__class__(raw(pkt)) while pkt.haslayer(IP) and pkt.haslayer(TCP): pkt = pkt.getlayer(IP) if isinstance(pkt.payload, TCP): break pkt = pkt.payload if not isinstance(pkt, IP) or not isinstance(pkt.payload, TCP): raise TypeError("Not a TCP/IP packet") db = p0f_selectdb(pkt.payload.flags) if osgenre: pb = db.get_base() if pb is None: pb = [] pb = [x for x in pb if x[6] == osgenre] if osdetails: pb = [x for x in pb if x[7] == osdetails] elif signature: pb = [signature] else: pb = p0f_getlocalsigs()[db] if db == p0fr_kdb: # 'K' quirk <=> RST+ACK if pkt.payload.flags & 0x4 == 0x4: pb = [x for x in pb if 'K' in x[5]] else: pb = [x for x in pb if 'K' not in x[5]] if not pb: raise Scapy_Exception("No match in the p0f database") pers = pb[random.randint(0, len(pb) - 1)] # options (we start with options because of MSS) # Take the options already set as "hints" to use in the new packet if we # can. MSS, WScale and Timestamp can all be wildcarded in a signature, so # we'll use the already-set values if they're valid integers. orig_opts = dict(pkt.payload.options) int_only = lambda val: val if isinstance(val, six.integer_types) else None mss_hint = int_only(orig_opts.get('MSS')) wscale_hint = int_only(orig_opts.get('WScale')) ts_hint = [int_only(o) for o in orig_opts.get('Timestamp', (None, None))] options = [] if pers[4] != '.': for opt in pers[4].split(','): if opt[0] == 'M': # MSS might have a maximum size because of window size # specification if pers[0][0] == 'S': maxmss = (2**16 - 1) // int(pers[0][1:]) else: maxmss = (2**16 - 1) # disregard hint if out of range if mss_hint and not 0 <= mss_hint <= maxmss: mss_hint = None # If we have to randomly pick up a value, we cannot use # scapy RandXXX() functions, because the value has to be # set in case we need it for the window size value. That's # why we use random.randint() if opt[1:] == '*': if mss_hint is not None: options.append(('MSS', mss_hint)) else: options.append(('MSS', random.randint(1, maxmss))) elif opt[1] == '%': coef = int(opt[2:]) if mss_hint is not None and mss_hint % coef == 0: options.append(('MSS', mss_hint)) else: options.append(( 'MSS', coef * random.randint(1, maxmss // coef))) else: options.append(('MSS', int(opt[1:]))) elif opt[0] == 'W': if wscale_hint and not 0 <= wscale_hint < 2**8: wscale_hint = None if opt[1:] == '*': if wscale_hint is not None: options.append(('WScale', wscale_hint)) else: options.append(('WScale', RandByte())) elif opt[1] == '%': coef = int(opt[2:]) if wscale_hint is not None and wscale_hint % coef == 0: options.append(('WScale', wscale_hint)) else: options.append(( 'WScale', coef * RandNum(min=1, max=(2**8 - 1) // coef))) # noqa: E501 else: options.append(('WScale', int(opt[1:]))) elif opt == 'T0': options.append(('Timestamp', (0, 0))) elif opt == 'T': # Determine first timestamp. if uptime is not None: ts_a = uptime elif ts_hint[0] and 0 < ts_hint[0] < 2**32: # Note: if first ts is 0, p0f registers it as "T0" not "T", # hence we don't want to use the hint if it was 0. ts_a = ts_hint[0] else: ts_a = random.randint(120, 100 * 60 * 60 * 24 * 365) # Determine second timestamp. if 'T' not in pers[5]: ts_b = 0 elif ts_hint[1] and 0 < ts_hint[1] < 2**32: ts_b = ts_hint[1] else: # FIXME: RandInt() here does not work (bug (?) in # TCPOptionsField.m2i often raises "OverflowError: # long int too large to convert to int" in: # oval = struct.pack(ofmt, *oval)" # Actually, this is enough to often raise the error: # struct.pack('I', RandInt()) ts_b = random.randint(1, 2**32 - 1) options.append(('Timestamp', (ts_a, ts_b))) elif opt == 'S': options.append(('SAckOK', '')) elif opt == 'N': options.append(('NOP', None)) elif opt == 'E': options.append(('EOL', None)) elif opt[0] == '?': if int(opt[1:]) in TCPOptions[0]: optname = TCPOptions[0][int(opt[1:])][0] optstruct = TCPOptions[0][int(opt[1:])][1] options.append((optname, struct.unpack(optstruct, RandString(struct.calcsize(optstruct))._fix()))) # noqa: E501 else: options.append((int(opt[1:]), '')) # FIXME: qqP not handled else: warning("unhandled TCP option " + opt) pkt.payload.options = options # window size if pers[0] == '*': pkt.payload.window = RandShort() elif pers[0].isdigit(): pkt.payload.window = int(pers[0]) elif pers[0][0] == '%': coef = int(pers[0][1:]) pkt.payload.window = coef * RandNum(min=1, max=(2**16 - 1) // coef) elif pers[0][0] == 'T': pkt.payload.window = mtu * int(pers[0][1:]) elif pers[0][0] == 'S': # needs MSS set mss = [x for x in options if x[0] == 'MSS'] if not mss: raise Scapy_Exception("TCP window value requires MSS, and MSS option not set") # noqa: E501 pkt.payload.window = mss[0][1] * int(pers[0][1:]) else: raise Scapy_Exception('Unhandled window size specification') # ttl pkt.ttl = pers[1] - extrahops # DF flag pkt.flags |= (2 * pers[2]) # FIXME: ss (packet size) not handled (how ? may be with D quirk # if present) # Quirks if pers[5] != '.': for qq in pers[5]: # FIXME: not handled: P, I, X, ! # T handled with the Timestamp option if qq == 'Z': pkt.id = 0 elif qq == 'U': pkt.payload.urgptr = RandShort() elif qq == 'A': pkt.payload.ack = RandInt() elif qq == 'F': if db == p0fo_kdb: pkt.payload.flags |= 0x20 # U else: pkt.payload.flags |= random.choice([8, 32, 40]) # P/U/PU elif qq == 'D' and db != p0fo_kdb: pkt /= conf.raw_layer(load=RandString(random.randint(1, 10))) # XXX p0fo.fp # noqa: E501 elif qq == 'Q': pkt.payload.seq = pkt.payload.ack # elif qq == '0': pkt.payload.seq = 0 # if db == p0fr_kdb: # '0' quirk is actually not only for p0fr.fp (see # packet2p0f()) if '0' in pers[5]: pkt.payload.seq = 0 elif pkt.payload.seq == 0: pkt.payload.seq = RandInt() while pkt.underlayer: pkt = pkt.underlayer return pkt
Modifies pkt so that p0f will think it has been sent by a specific OS. If osdetails is None, then we randomly pick up a personality matching osgenre. If osgenre and signature are also None, we use a local signature (using p0f_getlocalsigs). If signature is specified (as a tuple), we use the signature. For now, only TCP Syn packets are supported. Some specifications of the p0f.fp file are not (yet) implemented.
def GetLocations(): """Return all cloud locations available to the calling alias.""" r = clc.v1.API.Call('post','Account/GetLocations',{}) if r['Success'] != True: if clc.args: clc.v1.output.Status('ERROR',3,'Error calling %s. Status code %s. %s' % ('Account/GetLocations',r['StatusCode'],r['Message'])) raise Exception('Error calling %s. Status code %s. %s' % ('Account/GetLocations',r['StatusCode'],r['Message'])) elif int(r['StatusCode']) == 0: clc.LOCATIONS = [x['Alias'] for x in r['Locations']] return(r['Locations'])
Return all cloud locations available to the calling alias.