code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _bfd_rx(self, **kwargs): """Return the BFD minimum receive interval XML. You should not use this method. You probably want `BGP.bfd`. Args: min_rx (str): BFD receive interval in milliseconds (300, 500, etc) delete (bool): Remove the configuration if ``True``. Returns: XML to be passed to the switch. Raises: None """ int_type = kwargs['int_type'] method_name = 'interface_%s_bfd_interval_min_rx' % int_type bfd_rx = getattr(self._interface, method_name) config = bfd_rx(**kwargs) if kwargs['delete']: tag = 'min-rx' config.find('.//*%s' % tag).set('operation', 'delete') pass return config
Return the BFD minimum receive interval XML. You should not use this method. You probably want `BGP.bfd`. Args: min_rx (str): BFD receive interval in milliseconds (300, 500, etc) delete (bool): Remove the configuration if ``True``. Returns: XML to be passed to the switch. Raises: None
def is_complex(self) -> bool: """ Whether the field is "complex" eg. env variables should be parsed as JSON. """ from .main import BaseModel # noqa: F811 return ( self.shape != Shape.SINGLETON or lenient_issubclass(self.type_, (BaseModel, list, set, dict)) or hasattr(self.type_, '__pydantic_model__') # pydantic dataclass )
Whether the field is "complex" eg. env variables should be parsed as JSON.
def _sitelist(self, matrix): """ Returns a list of sites from a SiteMatrix, optionally filtered by 'domain' param """ _list = [] for item in matrix: sites = [] if isinstance(matrix[item], list): sites = matrix[item] elif isinstance(matrix[item], dict): sites = matrix[item]['site'] for site in sites: if len(site.keys()) > 4: # closed, fishbowl, private continue domain = self.params.get('domain') if domain: if domain in site['url']: _list.append(site['url']) else: _list.append(site['url']) return _list
Returns a list of sites from a SiteMatrix, optionally filtered by 'domain' param
def write_meta(self): """Writes all meta data, ucd,description and units The default implementation is to write this to a file called meta.yaml in the directory defined by :func:`DataFrame.get_private_dir`. Other implementation may store this in the DataFrame file itself. (For instance the vaex hdf5 implementation does this) This method is called after virtual columns or variables are added. Upon opening a file, :func:`DataFrame.update_meta` is called, so that the information is not lost between sessions. Note: opening a DataFrame twice may result in corruption of this file. """ # raise NotImplementedError path = os.path.join(self.get_private_dir(create=True), "meta.yaml") units = {key: str(value) for key, value in self.units.items()} meta_info = dict(description=self.description, ucds=self.ucds, units=units, descriptions=self.descriptions, ) vaex.utils.write_json_or_yaml(path, meta_info)
Writes all meta data, ucd,description and units The default implementation is to write this to a file called meta.yaml in the directory defined by :func:`DataFrame.get_private_dir`. Other implementation may store this in the DataFrame file itself. (For instance the vaex hdf5 implementation does this) This method is called after virtual columns or variables are added. Upon opening a file, :func:`DataFrame.update_meta` is called, so that the information is not lost between sessions. Note: opening a DataFrame twice may result in corruption of this file.
def _sync_outlineexplorer_file_order(self): """ Order the root file items of the outline explorer as in the tabbar of the current EditorStack. """ if self.outlineexplorer is not None: self.outlineexplorer.treewidget.set_editor_ids_order( [finfo.editor.get_document_id() for finfo in self.data])
Order the root file items of the outline explorer as in the tabbar of the current EditorStack.
def _check_axis(self, ds, name): ''' Checks that the axis attribute is a string and an allowed value, namely one of 'T', 'X', 'Y', or 'Z'. :param netCDF4.Dataset ds: An open netCDF dataset :param str name: Name of the variable :rtype: compliance_checker.base.Result ''' allowed_axis = ['T', 'X', 'Y', 'Z'] variable = ds.variables[name] axis = variable.axis valid_axis = TestCtx(BaseCheck.HIGH, self.section_titles['4']) axis_is_string = isinstance(axis, basestring), valid_axis.assert_true(axis_is_string and len(axis) > 0, "{}'s axis attribute must be a non-empty string".format(name)) # If axis isn't a string we can't continue any checks if not axis_is_string or len(axis) == 0: return valid_axis.to_result() valid_axis.assert_true(axis in allowed_axis, "{}'s axis attribute must be T, X, Y, or Z, ".format(name)+\ "currently {}".format(axis)) return valid_axis.to_result()
Checks that the axis attribute is a string and an allowed value, namely one of 'T', 'X', 'Y', or 'Z'. :param netCDF4.Dataset ds: An open netCDF dataset :param str name: Name of the variable :rtype: compliance_checker.base.Result
def grad(self, params, epsilon=0.0001): """Used to check gradient estimation through slope approximation.""" grad = [] for x in range(len(params)): temp = np.copy(params) temp[x] += epsilon temp2 = np.copy(params) temp2[x] -= epsilon grad.append((self.__cost_function(temp)-self.__cost_function(temp2))/(2*epsilon)) return np.array(grad)
Used to check gradient estimation through slope approximation.
def _parse_source_sections(self, diff_str): """ Given the output of `git diff`, return a dictionary with keys that are source file paths. Each value is a list of lines from the `git diff` output related to the source file. Raises a `GitDiffError` if `diff_str` is in an invalid format. """ # Create a dict to map source files to lines in the diff output source_dict = dict() # Keep track of the current source file src_path = None # Signal that we've found a hunk (after starting a source file) found_hunk = False # Parse the diff string into sections by source file for line in diff_str.split('\n'): # If the line starts with "diff --git" # or "diff --cc" (in the case of a merge conflict) # then it is the start of a new source file if line.startswith('diff --git') or line.startswith('diff --cc'): # Retrieve the name of the source file src_path = self._parse_source_line(line) # Create an entry for the source file, if we don't # already have one. if src_path not in source_dict: source_dict[src_path] = [] # Signal that we're waiting for a hunk for this source file found_hunk = False # Every other line is stored in the dictionary for this source file # once we find a hunk section else: # Only add lines if we're in a hunk section # (ignore index and files changed lines) if found_hunk or line.startswith('@@'): # Remember that we found a hunk found_hunk = True if src_path is not None: source_dict[src_path].append(line) else: # We tolerate other information before we have # a source file defined, unless it's a hunk line if line.startswith("@@"): msg = "Hunk has no source file: '{}'".format(line) raise GitDiffError(msg) return source_dict
Given the output of `git diff`, return a dictionary with keys that are source file paths. Each value is a list of lines from the `git diff` output related to the source file. Raises a `GitDiffError` if `diff_str` is in an invalid format.
def GetScriptHashesForVerifying(self): """ Get a list of script hashes for verifying transactions. Raises: Exception: if there are no valid assets in the transaction. Returns: list: of UInt160 type script hashes. """ if not self.References and len(self.Attributes) < 1: return [] hashes = set() for coinref, output in self.References.items(): hashes.add(output.ScriptHash) for attr in self.Attributes: if attr.Usage == TransactionAttributeUsage.Script: if type(attr.Data) is UInt160: hashes.add(attr.Data) else: hashes.add(UInt160(data=attr.Data)) for key, group in groupby(self.outputs, lambda p: p.AssetId): if self.raw_tx: asset = Helper.StaticAssetState(key) else: asset = GetBlockchain().GetAssetState(key.ToBytes()) if asset is None: raise Exception("Invalid operation") if asset.AssetType == AssetType.DutyFlag: for p in group: hashes.add(p.ScriptHash) hashlist = list(hashes) hashlist.sort() return hashlist
Get a list of script hashes for verifying transactions. Raises: Exception: if there are no valid assets in the transaction. Returns: list: of UInt160 type script hashes.
def copy(self): """Make a copy of this instance. Copies the local data stored as simple types and copies the client attached to this instance. :rtype: :class:`~google.cloud.spanner_v1.instance.Instance` :returns: A copy of the current instance. """ new_client = self._client.copy() return self.__class__( self.instance_id, new_client, self.configuration_name, node_count=self.node_count, display_name=self.display_name, )
Make a copy of this instance. Copies the local data stored as simple types and copies the client attached to this instance. :rtype: :class:`~google.cloud.spanner_v1.instance.Instance` :returns: A copy of the current instance.
def update_entity(self, entity, if_match='*'): ''' Adds an update entity operation to the batch. See :func:`~azure.storage.table.tableservice.TableService.update_entity` for more information on updates. The operation will not be executed until the batch is committed. :param entity: The entity to update. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`azure.storage.table.models.Entity` :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The update operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional update, set If-Match to the wildcard character (*). ''' request = _update_entity(entity, if_match) self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
Adds an update entity operation to the batch. See :func:`~azure.storage.table.tableservice.TableService.update_entity` for more information on updates. The operation will not be executed until the batch is committed. :param entity: The entity to update. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`azure.storage.table.models.Entity` :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The update operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional update, set If-Match to the wildcard character (*).
def fasta(self): ''' str: Returns the sequence, as a FASTA-formatted string Note: The FASTA string is built using ``Sequence.id`` and ``Sequence.sequence``. ''' if not self._fasta: self._fasta = '>{}\n{}'.format(self.id, self.sequence) return self._fasta
str: Returns the sequence, as a FASTA-formatted string Note: The FASTA string is built using ``Sequence.id`` and ``Sequence.sequence``.
def encode(strs): """Encodes a list of strings to a single string. :type strs: List[str] :rtype: str """ res = '' for string in strs.split(): res += str(len(string)) + ":" + string return res
Encodes a list of strings to a single string. :type strs: List[str] :rtype: str
def notes(path): """This function extracts any experimental notes from a ProCoDA data file. :param path: The file path of the ProCoDA data file. If the file is in the working directory, then the file name is sufficient. :type path: string :return: The rows of the data file that contain text notes inserted during the experiment. Use this to identify the section of the data file that you want to extract. :rtype: pandas.Dataframe """ df = pd.read_csv(path, delimiter='\t') text_row = df.iloc[0:-1, 0].str.contains('[a-z]', '[A-Z]') text_row_index = text_row.index[text_row].tolist() notes = df.loc[text_row_index] return notes
This function extracts any experimental notes from a ProCoDA data file. :param path: The file path of the ProCoDA data file. If the file is in the working directory, then the file name is sufficient. :type path: string :return: The rows of the data file that contain text notes inserted during the experiment. Use this to identify the section of the data file that you want to extract. :rtype: pandas.Dataframe
def find_commands(cls): """ Finds commands by finding the subclasses of Command""" cmds = [] for subclass in cls.__subclasses__(): cmds.append(subclass) cmds.extend(find_commands(subclass)) return cmds
Finds commands by finding the subclasses of Command
def get_bestfit_line(self, x_min=None, x_max=None, resolution=None): """ Method to get bestfit line using the defined self.bestfit_func method args: x_min: scalar, default=min(x) minimum x value of the line x_max: scalar, default=max(x) maximum x value of the line resolution: int, default=1000 how many steps between x_min and x_max returns: [bestfit_x, bestfit_y] """ x = self.args["x"] if x_min is None: x_min = min(x) if x_max is None: x_max = max(x) if resolution is None: resolution = self.args.get("resolution", 1000) bestfit_x = np.linspace(x_min, x_max, resolution) return [bestfit_x, self.bestfit_func(bestfit_x)]
Method to get bestfit line using the defined self.bestfit_func method args: x_min: scalar, default=min(x) minimum x value of the line x_max: scalar, default=max(x) maximum x value of the line resolution: int, default=1000 how many steps between x_min and x_max returns: [bestfit_x, bestfit_y]
def time(self): """ Returns the current time for this edit. :return <QtCore.QTime> """ if self.isMilitaryTime(): format = 'hh:mm:ss' time_of_day = '' else: format = 'hh:mm:ssap' time_of_day = self._timeOfDayCombo.currentText().lower() try: hour = int(self._hourCombo.currentText()) if self.showHours() else 1 except ValueError: hour = 1 try: minute = int(self._minuteCombo.currentText()) if self.showMinutes() else 0 except ValueError: minute = 0 try: second = int(self._secondCombo.currentText()) if self.showSeconds() else 0 except ValueError: second = 0 combined = '{0:02}:{1:02}:{2:02}{3}'.format(hour, minute, second, time_of_day) return QtCore.QTime.fromString(combined, format)
Returns the current time for this edit. :return <QtCore.QTime>
def get_objective_banks(self): """Pass through to provider ObjectiveBankLookupSession.get_objective_banks""" # Implemented from kitosid template for - # osid.resource.BinLookupSession.get_bins_template catalogs = self._get_provider_session('objective_bank_lookup_session').get_objective_banks() cat_list = [] for cat in catalogs: cat_list.append(ObjectiveBank(self._provider_manager, cat, self._runtime, self._proxy)) return ObjectiveBankList(cat_list)
Pass through to provider ObjectiveBankLookupSession.get_objective_banks
def sample_distinct(self, n_to_sample, **kwargs): """Sample a sequence of items from the pool until a minimum number of distinct items are queried Parameters ---------- n_to_sample : int number of distinct items to sample. If sampling with replacement, this number is not necessarily the same as the number of iterations. """ # Record how many distinct items have not yet been sampled n_notsampled = np.sum(np.isnan(self.cached_labels_)) if n_notsampled == 0: raise Exception("All distinct items have already been sampled.") if n_to_sample > n_notsampled: warnings.warn("Only {} distinct item(s) have not yet been sampled." " Setting n_to_sample = {}.".format(n_notsampled, \ n_notsampled)) n_to_sample = n_notsampled n_sampled = 0 # number of distinct items sampled this round while n_sampled < n_to_sample: self.sample(1,**kwargs) n_sampled += self._queried_oracle[self.t_ - 1]*1
Sample a sequence of items from the pool until a minimum number of distinct items are queried Parameters ---------- n_to_sample : int number of distinct items to sample. If sampling with replacement, this number is not necessarily the same as the number of iterations.
def regex(pattern, prompt=None, empty=False, flags=0): """Prompt a string that matches a regular expression. Parameters ---------- pattern : str A regular expression that must be matched. prompt : str, optional Use an alternative prompt. empty : bool, optional Allow an empty response. flags : int, optional Flags that will be passed to ``re.match``. Returns ------- Match or None A match object if the user entered a matching string. None if the user pressed only Enter and ``empty`` was True. See Also -------- re.match """ s = _prompt_input(prompt) if empty and not s: return None else: m = re.match(pattern, s, flags=flags) if m: return m else: return regex(pattern, prompt=prompt, empty=empty, flags=flags)
Prompt a string that matches a regular expression. Parameters ---------- pattern : str A regular expression that must be matched. prompt : str, optional Use an alternative prompt. empty : bool, optional Allow an empty response. flags : int, optional Flags that will be passed to ``re.match``. Returns ------- Match or None A match object if the user entered a matching string. None if the user pressed only Enter and ``empty`` was True. See Also -------- re.match
def retrieve_by_id(self, id_): """Return a JSSObject for the element with ID id_""" items_with_id = [item for item in self if item.id == int(id_)] if len(items_with_id) == 1: return items_with_id[0].retrieve()
Return a JSSObject for the element with ID id_
def start_worker(self): """Trigger new process as a RQ worker.""" if not self.include_rq: return None worker = Worker(queues=self.queues, connection=self.connection) worker_pid_path = current_app.config.get( "{}_WORKER_PID".format(self.config_prefix), 'rl_worker.pid' ) try: worker_pid_file = open(worker_pid_path, 'r') worker_pid = int(worker_pid_file.read()) print("Worker already started with PID=%d" % worker_pid) worker_pid_file.close() return worker_pid except (IOError, TypeError): self.worker_process = Process(target=worker_wrapper, kwargs={ 'worker_instance': worker, 'pid_path': worker_pid_path }) self.worker_process.start() worker_pid_file = open(worker_pid_path, 'w') worker_pid_file.write("%d" % self.worker_process.pid) worker_pid_file.close() print("Start a worker process with PID=%d" % self.worker_process.pid) return self.worker_process.pid
Trigger new process as a RQ worker.
def server(self): """ Returns :class:`plexapi.myplex.MyPlexResource` with server of current item. """ server = [s for s in self._server.resources() if s.clientIdentifier == self.machineIdentifier] if len(server) == 0: raise NotFound('Unable to find server with uuid %s' % self.machineIdentifier) return server[0]
Returns :class:`plexapi.myplex.MyPlexResource` with server of current item.
def token(self): """ Token given by Transbank for payment initialization url. Will raise PaymentError when an error ocurred. """ if not self._token: self._token = self.fetch_token() logger.payment(self) return self._token
Token given by Transbank for payment initialization url. Will raise PaymentError when an error ocurred.
def save(self, model_filename, optimizer_filename): """ Save the state of the model & optimizer to disk """ serializers.save_hdf5(model_filename, self.model) serializers.save_hdf5(optimizer_filename, self.optimizer)
Save the state of the model & optimizer to disk
def database_admin_api(self): """Helper for session-related API calls.""" if self._database_admin_api is None: self._database_admin_api = DatabaseAdminClient( credentials=self.credentials, client_info=_CLIENT_INFO ) return self._database_admin_api
Helper for session-related API calls.
def retrieve_equities(self, sids): """ Retrieve Equity objects for a list of sids. Users generally shouldn't need to this method (instead, they should prefer the more general/friendly `retrieve_assets`), but it has a documented interface and tests because it's used upstream. Parameters ---------- sids : iterable[string] Returns ------- equities : dict[str -> Equity] Raises ------ EquitiesNotFound When any requested asset isn't found. """ cache = self._asset_cache try: return { k: cache[k] for k in sids } except KeyError: raise EquitiesNotFound(sids=sids)
Retrieve Equity objects for a list of sids. Users generally shouldn't need to this method (instead, they should prefer the more general/friendly `retrieve_assets`), but it has a documented interface and tests because it's used upstream. Parameters ---------- sids : iterable[string] Returns ------- equities : dict[str -> Equity] Raises ------ EquitiesNotFound When any requested asset isn't found.
def Readdir(self, path, fh=None): """Reads a directory given by path. Args: path: The path to list children of. fh: A file handler. Not used. Yields: A generator of filenames. Raises: FuseOSError: If we try and list a file. """ del fh # We can't read a path if it's a file. if not self._IsDir(path): raise fuse.FuseOSError(errno.ENOTDIR) fd = aff4.FACTORY.Open(self.root.Add(path), token=self.token) children = fd.ListChildren() # Make these special directories unicode to be consistent with the rest of # aff4. for directory in [u".", u".."]: yield directory # ListChildren returns a generator, so we do the same. for child in children: # Filter out any directories we've chosen to ignore. if child.Path() not in self.ignored_dirs: yield child.Basename()
Reads a directory given by path. Args: path: The path to list children of. fh: A file handler. Not used. Yields: A generator of filenames. Raises: FuseOSError: If we try and list a file.
def from_json(cls, json_doc): """ Create and return a new Session Token based on the contents of a JSON document. :type json_doc: str :param json_doc: A string containing a JSON document with a previously saved Credentials object. """ d = json.loads(json_doc) token = cls() token.__dict__.update(d) return token
Create and return a new Session Token based on the contents of a JSON document. :type json_doc: str :param json_doc: A string containing a JSON document with a previously saved Credentials object.
def is_exported(bundle): """ Returns True if dataset is already exported to CKAN. Otherwise returns False. """ if not ckan: raise EnvironmentError(MISSING_CREDENTIALS_MSG) params = {'q': 'name:{}'.format(bundle.dataset.vid.lower())} resp = ckan.action.package_search(**params) return len(resp['results']) > 0
Returns True if dataset is already exported to CKAN. Otherwise returns False.
def find_cell_end(self, lines): """Return position of end of cell marker, and position of first line after cell""" if self.in_region: self.cell_type = 'markdown' for i, line in enumerate(lines): if self.end_region_re.match(line): return i, i + 1, True elif self.metadata is None: # default markdown: (last) two consecutive blank lines, except when in code blocks self.cell_type = 'markdown' prev_blank = 0 in_explicit_code_block = False in_indented_code_block = False for i, line in enumerate(lines): if in_explicit_code_block and self.end_code_re.match(line): in_explicit_code_block = False continue if self.non_jupyter_code_re and self.non_jupyter_code_re.match(line): in_explicit_code_block = True prev_blank = 0 continue if prev_blank and line.startswith(' ') and not _BLANK_LINE.match(line): in_indented_code_block = True prev_blank = 0 continue if in_indented_code_block and not _BLANK_LINE.match(line) and not line.startswith(' '): in_indented_code_block = False if in_indented_code_block or in_explicit_code_block: continue if self.start_code_re.match(line) or self.start_region_re.match(line): if i > 1 and prev_blank: return i - 1, i, False return i, i, False if self.split_at_heading and line.startswith('#') and prev_blank >= 1: return i - 1, i, False if _BLANK_LINE.match(lines[i]): prev_blank += 1 elif i > 2 and prev_blank >= 2: return i - 2, i, True else: prev_blank = 0 else: self.cell_type = 'code' for i, line in enumerate(lines): # skip cell header if i == 0: continue if self.end_code_re.match(line): return i, i + 1, True # End not found return len(lines), len(lines), False
Return position of end of cell marker, and position of first line after cell
def anneal(self, mode, matches, orig_matches): """ Perform post-processing. Return True when any changes were applied. """ changed = False def dupes_in_matches(): """Generator for index of matches that are dupes.""" items_by_path = config.engine.group_by('realpath') hashes = set([x.hash for x in matches]) for idx, item in enumerate(matches): same_path_but_not_in_matches = any( x.hash not in hashes for x in items_by_path.get(item.realpath, []) ) if item.realpath and same_path_but_not_in_matches: yield idx if mode == 'dupes+': items_by_path = config.engine.group_by('realpath') hashes = set([x.hash for x in matches]) dupes = [] for item in matches: if item.realpath: # Add all items with the same path that are missing for dupe in items_by_path.get(item.realpath, []): if dupe.hash not in hashes: changed = True dupes.append(dupe) hashes.add(dupe.hash) matches.extend(dupes) elif mode == 'dupes-': for idx in reversed(list(dupes_in_matches())): changed = True del matches[idx] elif mode == 'dupes=': items_by_path = config.engine.group_by('realpath') dupes = list(i for i in matches if i.realpath and len(items_by_path.get(i.realpath, [])) > 1) if len(dupes) != len(matches): changed = True matches[:] = dupes elif mode == 'invert': hashes = set([x.hash for x in matches]) changed = True matches[:] = list(i for i in orig_matches if i.hash not in hashes) elif mode == 'unique': seen, dupes = set(), [] for i, item in enumerate(matches): if item.name in seen: changed = True dupes.append(i) seen.add(item.name) for i in reversed(dupes): del matches[i] else: raise RuntimeError('Internal Error: Unknown anneal mode ' + mode) return changed
Perform post-processing. Return True when any changes were applied.
def _fix_quantities(tree): ''' Stupidly simple function to fix any Items/Quantity disparities inside a DistributionConfig block before use. Since AWS only accepts JSON-encodable data types, this implementation is "good enough" for our purposes. ''' if isinstance(tree, dict): tree = {k: _fix_quantities(v) for k, v in tree.items()} if isinstance(tree.get('Items'), list): tree['Quantity'] = len(tree['Items']) if not tree['Items']: tree.pop('Items') # Silly, but AWS requires it.... return tree elif isinstance(tree, list): return [_fix_quantities(t) for t in tree] else: return tree
Stupidly simple function to fix any Items/Quantity disparities inside a DistributionConfig block before use. Since AWS only accepts JSON-encodable data types, this implementation is "good enough" for our purposes.
def _set_fcoe_fcf_map(self, v, load=False): """ Setter method for fcoe_fcf_map, mapped from YANG variable /fcoe/fcoe_fabric_map/fcoe_fcf_map (list) If this variable is read-only (config: false) in the source YANG file, then _set_fcoe_fcf_map is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fcoe_fcf_map() directly. YANG Description: The list of FCF Groups. Each row contains the FCF group name, member FCoE map, FCF rbid and FDF rbids """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("fcf_map_name",fcoe_fcf_map.fcoe_fcf_map, yang_name="fcoe-fcf-map", rest_name="fcf-group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='fcf-map-name', extensions={u'tailf-common': {u'callpoint': u'fcf_map_cp', u'cli-suppress-list-no': None, u'alt-name': u'fcf-group', u'info': u'Configure an FCF Group', u'cli-mode-name': u'config-fabric-map-fcf-group-$(fcf-map-name)'}}), is_container='list', yang_name="fcoe-fcf-map", rest_name="fcf-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'fcf_map_cp', u'cli-suppress-list-no': None, u'alt-name': u'fcf-group', u'info': u'Configure an FCF Group', u'cli-mode-name': u'config-fabric-map-fcf-group-$(fcf-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """fcoe_fcf_map must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("fcf_map_name",fcoe_fcf_map.fcoe_fcf_map, yang_name="fcoe-fcf-map", rest_name="fcf-group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='fcf-map-name', extensions={u'tailf-common': {u'callpoint': u'fcf_map_cp', u'cli-suppress-list-no': None, u'alt-name': u'fcf-group', u'info': u'Configure an FCF Group', u'cli-mode-name': u'config-fabric-map-fcf-group-$(fcf-map-name)'}}), is_container='list', yang_name="fcoe-fcf-map", rest_name="fcf-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'fcf_map_cp', u'cli-suppress-list-no': None, u'alt-name': u'fcf-group', u'info': u'Configure an FCF Group', u'cli-mode-name': u'config-fabric-map-fcf-group-$(fcf-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='list', is_config=True)""", }) self.__fcoe_fcf_map = t if hasattr(self, '_set'): self._set()
Setter method for fcoe_fcf_map, mapped from YANG variable /fcoe/fcoe_fabric_map/fcoe_fcf_map (list) If this variable is read-only (config: false) in the source YANG file, then _set_fcoe_fcf_map is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fcoe_fcf_map() directly. YANG Description: The list of FCF Groups. Each row contains the FCF group name, member FCoE map, FCF rbid and FDF rbids
def resolve_dst(self, dst_dir, src): """ finds the destination based on source if source is an absolute path, and there's no pattern, it copies the file to base dst_dir """ if os.path.isabs(src): return os.path.join(dst_dir, os.path.basename(src)) return os.path.join(dst_dir, src)
finds the destination based on source if source is an absolute path, and there's no pattern, it copies the file to base dst_dir
def get_activity_admin_session_for_objective_bank(self, objective_bank_id=None): """Gets the OsidSession associated with the activity admin service for the given objective bank. arg: objectiveBankId (osid.id.Id): the Id of the objective bank return: (osid.learning.ActivityAdminSession) - an ActivityAdminSession raise: NotFound - objectiveBankId not found raise: NullArgument - objectiveBankId is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_activity_admin() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_activity_admin() and supports_visible_federation() are true. """ if not objective_bank_id: raise NullArgument if not self.supports_activity_admin(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() try: session = sessions.ActivityAdminSession(objective_bank_id, runtime=self._runtime) except AttributeError: raise OperationFailed() return session
Gets the OsidSession associated with the activity admin service for the given objective bank. arg: objectiveBankId (osid.id.Id): the Id of the objective bank return: (osid.learning.ActivityAdminSession) - an ActivityAdminSession raise: NotFound - objectiveBankId not found raise: NullArgument - objectiveBankId is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_activity_admin() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_activity_admin() and supports_visible_federation() are true.
def _make_parent(self): """Creates a parent key for the current path. Extracts all but the last element in the key path and creates a new key, while still matching the namespace and the project. :rtype: :class:`google.cloud.datastore.key.Key` or :class:`NoneType` :returns: A new ``Key`` instance, whose path consists of all but the last element of current path. If the current key has only one path element, returns ``None``. """ if self.is_partial: parent_args = self.flat_path[:-1] else: parent_args = self.flat_path[:-2] if parent_args: return self.__class__( *parent_args, project=self.project, namespace=self.namespace )
Creates a parent key for the current path. Extracts all but the last element in the key path and creates a new key, while still matching the namespace and the project. :rtype: :class:`google.cloud.datastore.key.Key` or :class:`NoneType` :returns: A new ``Key`` instance, whose path consists of all but the last element of current path. If the current key has only one path element, returns ``None``.
def merge_with(self, another, ubound=None, top_id=None): """ This method merges a tree of the current :class:`ITotalizer` object, with a tree of another object and (if needed) increases a potential upper bound that can be imposed on the complete list of literals in the sum of an existing :class:`ITotalizer` object to a new value. :param another: another totalizer to merge with. :param ubound: a new upper bound. :param top_id: a new top variable identifier. :type another: :class:`ITotalizer` :type ubound: int :type top_id: integer or None The top identifier ``top_id`` applied only if it is greater than the one used in ``self``. This method creates additional clauses encoding the existing totalizer tree merged with another totalizer tree into *one* sum and updating the upper bound. As a result, it appends the new clauses to the list of clauses of :class:`.CNF` ``self.cnf``. The number of newly created clauses is stored in variable ``self.nof_new``. Also, if the upper bound is updated, a list of bounds ``self.rhs`` gets increased and its length becomes ``ubound+1``. Otherwise, it is updated with new values. The method can be used in the following way: .. code-block:: python >>> from pysat.card import ITotalizer >>> with ITotalizer(lits=[1, 2], ubound=1) as t1: ... print t1.cnf.clauses [[-2, 3], [-1, 3], [-1, -2, 4]] ... print t1.rhs [3, 4] ... ... t2 = ITotalizer(lits=[5, 6], ubound=1) ... print t1.cnf.clauses [[-6, 7], [-5, 7], [-5, -6, 8]] ... print t1.rhs [7, 8] ... ... t1.merge_with(t2) ... print t1.cnf.clauses [[-2, 3], [-1, 3], [-1, -2, 4], [-6, 7], [-5, 7], [-5, -6, 8], [-7, 9], [-8, 10], [-3, 9], [-4, 10], [-3, -7, 10]] ... print t1.cnf.clauses[-t1.nof_new:] [[-6, 7], [-5, 7], [-5, -6, 8], [-7, 9], [-8, 10], [-3, 9], [-4, 10], [-3, -7, 10]] ... print t1.rhs [9, 10] ... ... t2.delete() """ self.top_id = max(self.top_id, top_id if top_id != None else 0, another.top_id) self.ubound = max(self.ubound, ubound if ubound != None else 0, another.ubound) # extending the list of input literals self.lits.extend(another.lits) # saving default SIGINT handler def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL) # updating the object and adding more variables and clauses self.tobj, clauses, self.rhs, self.top_id = pycard.itot_mrg(self.tobj, another.tobj, self.ubound, self.top_id) # recovering default SIGINT handler def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler) # saving the result self.cnf.clauses.extend(another.cnf.clauses) self.cnf.clauses.extend(clauses) self.cnf.nv = self.top_id # for convenience, keeping the number of new clauses self.nof_new = len(another.cnf.clauses) + len(clauses) # memory deallocation should not be done for the merged tree another._merged = True
This method merges a tree of the current :class:`ITotalizer` object, with a tree of another object and (if needed) increases a potential upper bound that can be imposed on the complete list of literals in the sum of an existing :class:`ITotalizer` object to a new value. :param another: another totalizer to merge with. :param ubound: a new upper bound. :param top_id: a new top variable identifier. :type another: :class:`ITotalizer` :type ubound: int :type top_id: integer or None The top identifier ``top_id`` applied only if it is greater than the one used in ``self``. This method creates additional clauses encoding the existing totalizer tree merged with another totalizer tree into *one* sum and updating the upper bound. As a result, it appends the new clauses to the list of clauses of :class:`.CNF` ``self.cnf``. The number of newly created clauses is stored in variable ``self.nof_new``. Also, if the upper bound is updated, a list of bounds ``self.rhs`` gets increased and its length becomes ``ubound+1``. Otherwise, it is updated with new values. The method can be used in the following way: .. code-block:: python >>> from pysat.card import ITotalizer >>> with ITotalizer(lits=[1, 2], ubound=1) as t1: ... print t1.cnf.clauses [[-2, 3], [-1, 3], [-1, -2, 4]] ... print t1.rhs [3, 4] ... ... t2 = ITotalizer(lits=[5, 6], ubound=1) ... print t1.cnf.clauses [[-6, 7], [-5, 7], [-5, -6, 8]] ... print t1.rhs [7, 8] ... ... t1.merge_with(t2) ... print t1.cnf.clauses [[-2, 3], [-1, 3], [-1, -2, 4], [-6, 7], [-5, 7], [-5, -6, 8], [-7, 9], [-8, 10], [-3, 9], [-4, 10], [-3, -7, 10]] ... print t1.cnf.clauses[-t1.nof_new:] [[-6, 7], [-5, 7], [-5, -6, 8], [-7, 9], [-8, 10], [-3, 9], [-4, 10], [-3, -7, 10]] ... print t1.rhs [9, 10] ... ... t2.delete()
def _prep_datum(self, datum, dialect, col, needs_conversion): """Puts a value in proper format for a SQL string""" if datum is None or (needs_conversion and not str(datum).strip()): return 'NULL' pytype = self.columns[col]['pytype'] if needs_conversion: if pytype == datetime.datetime: datum = dateutil.parser.parse(datum) elif pytype == bool: datum = th.coerce_to_specific(datum) if dialect.startswith('sqlite'): datum = 1 if datum else 0 else: datum = pytype(str(datum)) if isinstance(datum, datetime.datetime) or isinstance(datum, datetime.date): if dialect in self._datetime_format: return datum.strftime(self._datetime_format[dialect]) else: return "'%s'" % datum elif hasattr(datum, 'lower'): # simple SQL injection protection, sort of... ? return "'%s'" % datum.replace("'", "''") else: return datum
Puts a value in proper format for a SQL string
def add_graph(patterns, G): """Add a graph to a set of unique patterns.""" if not patterns: patterns.append([G]) return for i, graphs in enumerate(patterns): if networkx.is_isomorphic(graphs[0], G, node_match=type_match, edge_match=type_match): patterns[i].append(G) return patterns.append([G])
Add a graph to a set of unique patterns.
def exists(self): """ Checks if item already exists in database """ self_object = self.query.filter_by(id=self.id).first() if self_object is None: return False return True
Checks if item already exists in database
def _get_size(size, size_max, size_min, default_max, default_min): """ Helper method for providing a size, or a range to randomize from """ if len(default_max) != len(default_min): raise ValueError('default_max = {} and default_min = {}' .format(str(default_max), str(default_min)) + ' have different lengths') if size is not None: if (size_max is not None) or (size_min is not None): raise ValueError('size = {} overrides size_max = {}, size_min = {}' .format(size, size_max, size_min)) else: if size_max is None: size_max = default_max if size_min is None: size_min = default_min size = np.array([np.random.uniform(size_min[i], size_max[i]) for i in range(len(default_max))]) return size
Helper method for providing a size, or a range to randomize from
def fit(self, X, y=None, **kwargs): """ Fits n KMeans models where n is the length of ``self.k_values_``, storing the silhouette scores in the ``self.k_scores_`` attribute. The "elbow" and silhouette score corresponding to it are stored in ``self.elbow_value`` and ``self.elbow_score`` respectively. This method finishes up by calling draw to create the plot. """ self.k_scores_ = [] self.k_timers_ = [] if self.locate_elbow: self.elbow_value_ = None self.elbow_score_ = None for k in self.k_values_: # Compute the start time for each model start = time.time() # Set the k value and fit the model self.estimator.set_params(n_clusters=k) self.estimator.fit(X) # Append the time and score to our plottable metrics self.k_timers_.append(time.time() - start) self.k_scores_.append( self.scoring_metric(X, self.estimator.labels_) ) if self.locate_elbow: locator_kwargs = { 'distortion': {'curve_nature': 'convex', 'curve_direction': 'decreasing'}, 'silhouette': {'curve_nature': 'concave', 'curve_direction': 'increasing'}, 'calinski_harabaz': {'curve_nature': 'concave', 'curve_direction': 'increasing'}, }.get(self.metric, {}) elbow_locator = KneeLocator(self.k_values_,self.k_scores_,**locator_kwargs) self.elbow_value_ = elbow_locator.knee if self.elbow_value_ == None: warning_message=\ "No 'knee' or 'elbow' point detected, " \ "pass `locate_elbow=False` to remove the warning" warnings.warn(warning_message,YellowbrickWarning) else: self.elbow_score_ = self.k_scores_[self.k_values_.index(self.elbow_value_)] self.draw() return self
Fits n KMeans models where n is the length of ``self.k_values_``, storing the silhouette scores in the ``self.k_scores_`` attribute. The "elbow" and silhouette score corresponding to it are stored in ``self.elbow_value`` and ``self.elbow_score`` respectively. This method finishes up by calling draw to create the plot.
def to_string(cls, error_code): """Returns the string message for the given error code. Args: cls (JLinkRTTErrors): the ``JLinkRTTErrors`` class error_code (int): error code to convert Returns: An error string corresponding to the error code. Raises: ValueError: if the error code is invalid. """ if error_code == cls.RTT_ERROR_CONTROL_BLOCK_NOT_FOUND: return 'The RTT Control Block has not yet been found (wait?)' return super(JLinkRTTErrors, cls).to_string(error_code)
Returns the string message for the given error code. Args: cls (JLinkRTTErrors): the ``JLinkRTTErrors`` class error_code (int): error code to convert Returns: An error string corresponding to the error code. Raises: ValueError: if the error code is invalid.
def _prep_cnv_file(cns_file, svcaller, work_dir, data): """Create a CSV file of CNV calls with log2 and number of marks. """ in_file = cns_file out_file = os.path.join(work_dir, "%s-%s-prep.csv" % (utils.splitext_plus(os.path.basename(in_file))[0], svcaller)) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: with open(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: reader = csv.reader(in_handle, dialect="excel-tab") writer = csv.writer(out_handle) writer.writerow(["chrom", "start", "end", "num.mark", "seg.mean"]) header = next(reader) for line in reader: cur = dict(zip(header, line)) if chromhacks.is_autosomal(cur["chromosome"]): writer.writerow([_to_ucsc_style(cur["chromosome"]), cur["start"], cur["end"], cur["probes"], cur["log2"]]) return out_file
Create a CSV file of CNV calls with log2 and number of marks.
def information_coefficient(total1,total2,intersect): '''a simple jacaard (information coefficient) to compare two lists of overlaps/diffs ''' total = total1 + total2 return 2.0*len(intersect) / total
a simple jacaard (information coefficient) to compare two lists of overlaps/diffs
def _submit(self): '''submit a uservoice ticket. When we get here we should have: {'user_prompt_issue': 'I want to do the thing.', 'record_asciinema': '/tmp/helpme.93o__nt5.json', 'record_environment': ((1,1),(2,2)...(N,N))} Required Client Variables self.api_key self.api_secret self.subdomain self.email ''' # Step 0: Authenticate with uservoice API self.authenticate() title = "HelpMe UserVoice Ticket: %s" %(self.run_id) body = self.data['user_prompt_issue'] # Step 1: Environment envars = self.data.get('record_environment') if envars not in [None, '', []]: body += '\n\nEnvironment:\n' for envar in envars: body += ' - %s: %s\n' %(envar[0], envar[1]) # Step 2: Asciinema asciinema = self.data.get('record_asciinema') if asciinema not in [None, '']: url = upload_asciinema(asciinema) # If the upload is successful, add a link to it. if url is not None: body += "\n\nAsciinema Recording: %s" %url # Add other metadata about client body += "\ngenerated by HelpMe: https://vsoch.github.io/helpme/" # Submit the ticket! self.post_ticket(title, body)
submit a uservoice ticket. When we get here we should have: {'user_prompt_issue': 'I want to do the thing.', 'record_asciinema': '/tmp/helpme.93o__nt5.json', 'record_environment': ((1,1),(2,2)...(N,N))} Required Client Variables self.api_key self.api_secret self.subdomain self.email
async def stop(self): """Stop heartbeat.""" self.stopped = True self.loop_event.set() # Waiting for shutdown of loop() await self.stopped_event.wait()
Stop heartbeat.
def data_directory(self): """ The absolute pathname of the directory where pip-accel's data files are stored (a string). - Environment variable: ``$PIP_ACCEL_CACHE`` - Configuration option: ``data-directory`` - Default: ``/var/cache/pip-accel`` if running as ``root``, ``~/.pip-accel`` otherwise """ return expand_path(self.get(property_name='data_directory', environment_variable='PIP_ACCEL_CACHE', configuration_option='data-directory', default='/var/cache/pip-accel' if is_root() else '~/.pip-accel'))
The absolute pathname of the directory where pip-accel's data files are stored (a string). - Environment variable: ``$PIP_ACCEL_CACHE`` - Configuration option: ``data-directory`` - Default: ``/var/cache/pip-accel`` if running as ``root``, ``~/.pip-accel`` otherwise
def add_track(self, *args, **kwargs): """ Add a track to a position. Parameters ---------- track_type: string The type of track to add (e.g. "heatmap", "line") position: string One of 'top', 'bottom', 'center', 'left', 'right' tileset: hgflask.tilesets.Tileset The tileset to be plotted in this track server: string The server serving this track height: int The height of the track, if it is a top, bottom or a center track width: int The width of the track, if it is a left, right or a center track """ new_track = Track(*args, **kwargs) self.tracks = self.tracks + [new_track]
Add a track to a position. Parameters ---------- track_type: string The type of track to add (e.g. "heatmap", "line") position: string One of 'top', 'bottom', 'center', 'left', 'right' tileset: hgflask.tilesets.Tileset The tileset to be plotted in this track server: string The server serving this track height: int The height of the track, if it is a top, bottom or a center track width: int The width of the track, if it is a left, right or a center track
def makedoetree(ddict, bdict): """makedoetree""" dlist = list(ddict.keys()) blist = list(bdict.keys()) dlist.sort() blist.sort() #make space dict doesnot = 'DOES NOT' lst = [] for num in range(0, len(blist)): if bdict[blist[num]] == doesnot:#belong lst = lst + [blist[num]] doedict = {} for num in range(0, len(lst)): #print lst[num] doedict[lst[num]] = {} lv1list = list(doedict.keys()) lv1list.sort() #make wall dict #for each space for i in range(0, len(lv1list)): walllist = [] adict = doedict[lv1list[i]] #loop thru the entire blist dictonary and list the ones that belong into walllist for num in range(0, len(blist)): if bdict[blist[num]] == lv1list[i]: walllist = walllist + [blist[num]] #put walllist into dict for j in range(0, len(walllist)): adict[walllist[j]] = {} #make window dict #for each space for i in range(0, len(lv1list)): adict1 = doedict[lv1list[i]] #for each wall walllist = list(adict1.keys()) walllist.sort() for j in range(0, len(walllist)): windlist = [] adict2 = adict1[walllist[j]] #loop thru the entire blist dictonary and list the ones that belong into windlist for num in range(0, len(blist)): if bdict[blist[num]] == walllist[j]: windlist = windlist + [blist[num]] #put walllist into dict for k in range(0, len(windlist)): adict2[windlist[k]] = {} return doedict
makedoetree
def take_along_axis(large_array, indexes): """ Take along axis """ # Reshape indexes into the right shape if len(large_array.shape) > len(indexes.shape): indexes = indexes.reshape(indexes.shape + tuple([1] * (len(large_array.shape) - len(indexes.shape)))) return np.take_along_axis(large_array, indexes, axis=0)
Take along axis
def reshape(self, shape: tf.TensorShape) -> 'TensorFluent': '''Returns a TensorFluent for the reshape operation with given `shape`. Args: shape: The output's shape. Returns: A TensorFluent wrapping the reshape operation. ''' t = tf.reshape(self.tensor, shape) scope = self.scope.as_list() batch = self.batch return TensorFluent(t, scope, batch=batch)
Returns a TensorFluent for the reshape operation with given `shape`. Args: shape: The output's shape. Returns: A TensorFluent wrapping the reshape operation.
def _candidate_merges(self, f): """ Identifies those features that originally had the same ID as `f` (according to the id_spec), but were modified because of duplicate IDs. """ candidates = [self._get_feature(f.id)] c = self.conn.cursor() results = c.execute( constants._SELECT + ''' JOIN duplicates ON duplicates.newid = features.id WHERE duplicates.idspecid = ?''', (f.id,) ) for i in results: candidates.append( feature.Feature(dialect=self.iterator.dialect, **i)) return list(set(candidates))
Identifies those features that originally had the same ID as `f` (according to the id_spec), but were modified because of duplicate IDs.
def get_displays_params(self) -> str: '''Show displays parameters.''' output, error = self._execute( '-s', self.device_sn, 'shell', 'dumpsys', 'window', 'displays') return output
Show displays parameters.
def make_PCEExtension_for_prebuilding_Code( name, Code, prebuild_sources, srcdir, downloads=None, **kwargs): """ If subclass of codeexport.Generic_Code needs to have some of it sources compiled to objects and cached in a `prebuilt/` directory at invocation of `setup.py build_ext` this convenience function makes setting up a PCEExtension easier. Use together with cmdclass = {'build_ext': pce_build_ext}. files called ".metadata*" will be added to dist_files """ import glob from .dist import PCEExtension build_files = [] dist_files = [(os.path.join(srcdir, x[0]), x[1]) for x in getattr(Code, 'dist_files', [])] for attr in ('build_files', 'templates'): for cf in getattr(Code, attr, []) or []: if not cf.startswith('prebuilt'): build_files.append(os.path.join(srcdir, cf)) dist_files.append((os.path.join(srcdir, cf), None)) def prebuilder(build_temp, ext_fullpath, ext, src_paths, **prebuilder_kwargs): build_temp = os.path.abspath(build_temp) if not os.path.isdir(build_temp): make_dirs(build_temp) if downloads: websrc, src_md5 = downloads download_dir = os.path.join(build_temp, srcdir) if not os.path.isdir(download_dir): make_dirs(download_dir) download_files(websrc, src_md5.keys(), src_md5, cwd=download_dir, logger=ext.logger) for p in src_paths: if p not in build_files: copy(os.path.join(srcdir, p), os.path.join(build_temp, srcdir), dest_is_dir=True, create_dest_dirs=True, only_update=ext.only_update, logger=ext.logger) dst = os.path.abspath(os.path.join( os.path.dirname(ext_fullpath), 'prebuilt/')) make_dirs(dst, logger=ext.logger) objs = compile_sources( [os.path.join(srcdir, x) for x in src_paths], destdir=dst, cwd=build_temp, metadir=dst, only_update=True, logger=ext.logger, **prebuilder_kwargs) glb = os.path.join(ext_fullpath, '.metadata*') dist_files.extend(glob.glob(glb)) for obj in objs: # Copy prebuilt objects into lib for distriubtion copy(os.path.join(build_temp, obj), dst, dest_is_dir=True, create_dest_dirs=True, only_update=ext.only_update, logger=ext.logger) return objs compile_kwargs = Code.compile_kwargs.copy() logger = kwargs.pop('logger', True) compile_kwargs.update(kwargs) return PCEExtension( name, [], build_files=build_files, dist_files=dist_files, build_callbacks=[ ( prebuilder, (prebuild_sources,), compile_kwargs ), ], logger=logger, link_ext=False )
If subclass of codeexport.Generic_Code needs to have some of it sources compiled to objects and cached in a `prebuilt/` directory at invocation of `setup.py build_ext` this convenience function makes setting up a PCEExtension easier. Use together with cmdclass = {'build_ext': pce_build_ext}. files called ".metadata*" will be added to dist_files
def node(self, name): """Gets a single node from PuppetDB. :param name: The name of the node search. :type name: :obj:`string` :return: An instance of Node :rtype: :class:`pypuppetdb.types.Node` """ nodes = self.nodes(path=name) return next(node for node in nodes)
Gets a single node from PuppetDB. :param name: The name of the node search. :type name: :obj:`string` :return: An instance of Node :rtype: :class:`pypuppetdb.types.Node`
def cli(conf): """The fedora-messaging command line interface.""" if conf: if not os.path.isfile(conf): raise click.exceptions.BadParameter("{} is not a file".format(conf)) try: config.conf.load_config(config_path=conf) except exceptions.ConfigurationException as e: raise click.exceptions.BadParameter(str(e)) twisted_observer = legacy_twisted_log.PythonLoggingObserver() twisted_observer.start() config.conf.setup_logging()
The fedora-messaging command line interface.
def display_waypoints(self): '''display the waypoints''' from MAVProxy.modules.mavproxy_map import mp_slipmap self.mission_list = self.module('wp').wploader.view_list() polygons = self.module('wp').wploader.polygon_list() self.map.add_object(mp_slipmap.SlipClearLayer('Mission')) for i in range(len(polygons)): p = polygons[i] if len(p) > 1: items = [MPMenuItem('Set', returnkey='popupMissionSet'), MPMenuItem('WP Remove', returnkey='popupMissionRemove'), MPMenuItem('WP Move', returnkey='popupMissionMove'), MPMenuItem('Remove NoFly', returnkey='popupMissionRemoveNoFly'), ] popup = MPMenuSubMenu('Popup', items) self.map.add_object(mp_slipmap.SlipPolygon('mission %u' % i, p, layer='Mission', linewidth=2, colour=(255,255,255), arrow = self.map_settings.showdirection, popup_menu=popup)) labeled_wps = {} self.map.add_object(mp_slipmap.SlipClearLayer('LoiterCircles')) for i in range(len(self.mission_list)): next_list = self.mission_list[i] for j in range(len(next_list)): #label already printed for this wp? if (next_list[j] not in labeled_wps): label = self.label_for_waypoint(next_list[j]) colour = self.colour_for_wp(next_list[j]) self.map.add_object(mp_slipmap.SlipLabel( 'miss_cmd %u/%u' % (i,j), polygons[i][j], label, 'Mission', colour=colour)) if (self.map_settings.loitercircle and self.module('wp').wploader.wp_is_loiter(next_list[j])): wp = self.module('wp').wploader.wp(next_list[j]) if wp.command != mavutil.mavlink.MAV_CMD_NAV_LOITER_TO_ALT and wp.param3 != 0: # wp radius and direction is defined by the mission loiter_rad = wp.param3 elif wp.command == mavutil.mavlink.MAV_CMD_NAV_LOITER_TO_ALT and wp.param2 != 0: # wp radius and direction is defined by the mission loiter_rad = wp.param2 else: # wp radius and direction is defined by the parameter loiter_rad = self.get_mav_param('WP_LOITER_RAD') self.map.add_object(mp_slipmap.SlipCircle('Loiter Circle %u' % (next_list[j] + 1), 'LoiterCircles', polygons[i][j], loiter_rad, (255, 255, 255), 2, arrow = self.map_settings.showdirection)) labeled_wps[next_list[j]] = (i,j)
display the waypoints
def _canonicalize(self, filename): """Use .collection as extension unless provided""" path, ext = os.path.splitext(filename) if not ext: ext = ".collection" return path + ext
Use .collection as extension unless provided
def resource(self, uri, methods=frozenset({'GET'}), host=None, strict_slashes=None, stream=False, version=None, name=None, **kwargs): """ Create a blueprint resource route from a decorated function. :param uri: endpoint at which the route will be accessible. :param methods: list of acceptable HTTP methods. :param host: :param strict_slashes: :param version: :param name: user defined route name for url_for :return: function or class instance Accepts any keyword argument that will be passed to the app resource. """ if strict_slashes is None: strict_slashes = self.strict_slashes def decorator(handler): self.resources.append(( FutureRoute(handler, uri, methods, host, strict_slashes, stream, version, name), kwargs)) return handler return decorator
Create a blueprint resource route from a decorated function. :param uri: endpoint at which the route will be accessible. :param methods: list of acceptable HTTP methods. :param host: :param strict_slashes: :param version: :param name: user defined route name for url_for :return: function or class instance Accepts any keyword argument that will be passed to the app resource.
def add_to_item_list(self, item_urls, item_list_url): """ Instruct the server to add the given items to the specified Item List :type item_urls: List or ItemGroup :param item_urls: List of URLs for the items to add, or an ItemGroup object :type item_list_url: String or ItemList :param item_list_url: the URL of the list to which to add the items, or an ItemList object :rtype: String :returns: the server success message, if successful :raises: APIError if the request was not successful """ item_list_url = str(item_list_url) name = self.get_item_list(item_list_url).name() return self.add_to_item_list_by_name(item_urls, name)
Instruct the server to add the given items to the specified Item List :type item_urls: List or ItemGroup :param item_urls: List of URLs for the items to add, or an ItemGroup object :type item_list_url: String or ItemList :param item_list_url: the URL of the list to which to add the items, or an ItemList object :rtype: String :returns: the server success message, if successful :raises: APIError if the request was not successful
def do_handle_log(self, workunit, level, *msg_elements): """Implementation of Reporter callback.""" entry_info = { 'level': self._log_level_str[level], 'messages': self._render_messages(*msg_elements), } root_id = str(workunit.root().id) current_stack = self._root_id_to_workunit_stack[root_id] if current_stack: current_stack[-1]['log_entries'].append(entry_info) else: self.results[root_id]['log_entries'].append(entry_info)
Implementation of Reporter callback.
def get_allowed_methods(self): """Returns a coma-separated list of method names that are allowed on this instance. Useful to set the ``Allowed`` response header. """ return ", ".join([method for method in dir(self) if method.upper() == method and callable(getattr(self, method))])
Returns a coma-separated list of method names that are allowed on this instance. Useful to set the ``Allowed`` response header.
def sunset_utc(self, date, latitude, longitude, observer_elevation=0): """Calculate sunset time in the UTC timezone. :param date: Date to calculate for. :type date: :class:`datetime.date` :param latitude: Latitude - Northern latitudes should be positive :type latitude: float :param longitude: Longitude - Eastern longitudes should be positive :type longitude: float :param observer_elevation: Elevation in metres to calculate sunset for :type observer_elevation: int :return: The UTC date and time at which sunset occurs. :rtype: :class:`~datetime.datetime` """ try: return self._calc_time(90 + 0.833, SUN_SETTING, date, latitude, longitude, observer_elevation) except ValueError as exc: if exc.args[0] == "math domain error": raise AstralError( ("Sun never reaches the horizon on this day, " "at this location.") ) else: raise
Calculate sunset time in the UTC timezone. :param date: Date to calculate for. :type date: :class:`datetime.date` :param latitude: Latitude - Northern latitudes should be positive :type latitude: float :param longitude: Longitude - Eastern longitudes should be positive :type longitude: float :param observer_elevation: Elevation in metres to calculate sunset for :type observer_elevation: int :return: The UTC date and time at which sunset occurs. :rtype: :class:`~datetime.datetime`
def Call(self, Id=0): """Queries a call object. :Parameters: Id : int Call identifier. :return: Call object. :rtype: `call.Call` """ o = Call(self, Id) o.Status # Test if such a call exists. return o
Queries a call object. :Parameters: Id : int Call identifier. :return: Call object. :rtype: `call.Call`
def get_item_type_id_from_identifier(self, identifier, item_types=None): """ Get an ID of item type for the given identifier. Identifier is a string of the following form: <model_prefix>/<model_identifier> where <model_prefix> is any suffix of database table of the given model which uniquely specifies the table, and <model_identifier> is identifier of the object. Args: identifier (str): item identifier item_types (dict): ID -> item type JSON Returns: int: ID of the corresponding item type """ if item_types is None: item_types = ItemType.objects.get_all_types() identifier_type, _ = identifier.split('/') item_types = [it for it in item_types.values() if it['table'].endswith(identifier_type)] if len(item_types) > 1: raise Exception('There is more than one item type for name "{}".'.format(identifier_type)) if len(item_types) == 0: raise Exception('There is no item type for name "{}".'.format(identifier_type)) return item_types[0]['id']
Get an ID of item type for the given identifier. Identifier is a string of the following form: <model_prefix>/<model_identifier> where <model_prefix> is any suffix of database table of the given model which uniquely specifies the table, and <model_identifier> is identifier of the object. Args: identifier (str): item identifier item_types (dict): ID -> item type JSON Returns: int: ID of the corresponding item type
def default(cls) -> 'PrecalculatedTextMeasurer': """Returns a reasonable default PrecalculatedTextMeasurer.""" if cls._default_cache is not None: return cls._default_cache if pkg_resources.resource_exists(__name__, 'default-widths.json.xz'): import lzma with pkg_resources.resource_stream(__name__, 'default-widths.json.xz') as f: with lzma.open(f, "rt") as g: cls._default_cache = PrecalculatedTextMeasurer.from_json( cast(TextIO, g)) return cls._default_cache elif pkg_resources.resource_exists(__name__, 'default-widths.json'): with pkg_resources.resource_stream(__name__, 'default-widths.json') as f: cls._default_cache = PrecalculatedTextMeasurer.from_json( io.TextIOWrapper(f, encoding='utf-8')) return cls._default_cache else: raise ValueError('could not load default-widths.json')
Returns a reasonable default PrecalculatedTextMeasurer.
def get_legacy_storage_path(self): """ Detect and return existing legacy storage path. """ config_dir = os.path.dirname( self.py3_wrapper.config.get("i3status_config_path", "/tmp") ) storage_path = os.path.join(config_dir, "py3status.data") if os.path.exists(storage_path): return storage_path else: return None
Detect and return existing legacy storage path.
def BSearchCeil(a, x, lo=0, hi=None): """Returns lowest i such as a[i] >= x, or -1 if x > all elements in a So, if x is in between two elements in a, this function will return the index of the higher element, hence "Ceil". Arguments: a -- ordered numeric sequence x -- element to search within a lo -- lowest index to consider in search hi -- highest index to consider in search""" if len(a) == 0: return -1 hi = hi if hi is not None else len(a) pos = bisect_left(a, x, lo, hi) return pos if pos < hi else -1
Returns lowest i such as a[i] >= x, or -1 if x > all elements in a So, if x is in between two elements in a, this function will return the index of the higher element, hence "Ceil". Arguments: a -- ordered numeric sequence x -- element to search within a lo -- lowest index to consider in search hi -- highest index to consider in search
def from_computed_structure_entry(entry, miller_index, label=None, adsorbates=None, clean_entry=None, **kwargs): """ Returns SlabEntry from a ComputedStructureEntry """ return SlabEntry(entry.structure, entry.energy, miller_index, label=label, adsorbates=adsorbates, clean_entry=clean_entry, **kwargs)
Returns SlabEntry from a ComputedStructureEntry
def write(self, node, filehandle): """Write JSON to `filehandle` starting at `node`.""" dictexporter = self.dictexporter or DictExporter() data = dictexporter.export(node) return json.dump(data, filehandle, **self.kwargs)
Write JSON to `filehandle` starting at `node`.
def _unpack_oxm_field(self): """Unpack oxm_field from oxm_field_and_mask. Returns: :class:`OxmOfbMatchField`, int: oxm_field from oxm_field_and_mask. Raises: ValueError: If oxm_class is OFPXMC_OPENFLOW_BASIC but :class:`OxmOfbMatchField` has no such integer value. """ field_int = self.oxm_field_and_mask >> 1 # We know that the class below requires a subset of the ofb enum if self.oxm_class == OxmClass.OFPXMC_OPENFLOW_BASIC: return OxmOfbMatchField(field_int) return field_int
Unpack oxm_field from oxm_field_and_mask. Returns: :class:`OxmOfbMatchField`, int: oxm_field from oxm_field_and_mask. Raises: ValueError: If oxm_class is OFPXMC_OPENFLOW_BASIC but :class:`OxmOfbMatchField` has no such integer value.
def _run_play(self, play): ''' run a list of tasks for a given pattern, in order ''' self.callbacks.on_play_start(play.name) # if no hosts matches this play, drop out if not self.inventory.list_hosts(play.hosts): self.callbacks.on_no_hosts_matched() return True # get facts from system self._do_setup_step(play) # now with that data, handle contentional variable file imports! all_hosts = self._list_available_hosts(play.hosts) play.update_vars_files(all_hosts) serialized_batch = [] if play.serial <= 0: serialized_batch = [all_hosts] else: # do N forks all the way through before moving to next while len(all_hosts) > 0: play_hosts = [] for x in range(play.serial): if len(all_hosts) > 0: play_hosts.append(all_hosts.pop()) serialized_batch.append(play_hosts) for on_hosts in serialized_batch: self.inventory.also_restrict_to(on_hosts) for task in play.tasks(): # only run the task if the requested tags match should_run = False for x in self.only_tags: for y in task.tags: if (x==y): should_run = True break if should_run: if not self._run_task(play, task, False): # whether no hosts matched is fatal or not depends if it was on the initial step. # if we got exactly no hosts on the first step (setup!) then the host group # just didn't match anything and that's ok return False host_list = self._list_available_hosts(play.hosts) # if no hosts remain, drop out if not host_list: self.callbacks.on_no_hosts_remaining() return False # run notify actions for handler in play.handlers(): if len(handler.notified_by) > 0: self.inventory.restrict_to(handler.notified_by) self._run_task(play, handler, True) self.inventory.lift_restriction() handler.notified_by = [] self.inventory.lift_also_restriction() return True
run a list of tasks for a given pattern, in order
def augmentation_transform(self, data, label): # pylint: disable=arguments-differ """Override Transforms input data with specified augmentations.""" for aug in self.auglist: data, label = aug(data, label) return (data, label)
Override Transforms input data with specified augmentations.
def activate(self, target=None, **options): """Activate DEP communication with a target.""" log.debug("initiator options: {0}".format(options)) self.did = options.get('did', None) self.nad = options.get('nad', None) self.gbi = options.get('gbi', '')[0:48] self.brs = min(max(0, options.get('brs', 2)), 2) self.lri = min(max(0, options.get('lri', 3)), 3) if self._acm is None or 'acm' in options: self._acm = bool(options.get('acm', True)) assert self.did is None or 0 <= self.did <= 255 assert self.nad is None or 0 <= self.nad <= 255 ppi = (self.lri << 4) | (bool(self.gbi) << 1) | int(bool(self.nad)) did = 0 if self.did is None else self.did atr_req = ATR_REQ(os.urandom(10), did, 0, 0, ppi, self.gbi) psl_req = PSL_REQ(did, (0, 9, 18)[self.brs], self.lri) atr_res = psl_res = None self.target = target if self.target is None and self.acm is True: log.debug("searching active communication mode target at 106A") tg = nfc.clf.RemoteTarget("106A", atr_req=atr_req.encode()) try: self.target = self.clf.sense(tg, iterations=2, interval=0.1) except nfc.clf.UnsupportedTargetError: self._acm = False except nfc.clf.CommunicationError: pass else: if self.target: atr_res = ATR_RES.decode(self.target.atr_res) else: self._acm = None if self.target is None: log.debug("searching passive communication mode target at 106A") target = nfc.clf.RemoteTarget("106A") target = self.clf.sense(target, iterations=2, interval=0.1) if target and target.sel_res and bool(target.sel_res[0] & 0x40): self.target = target if self.target is None and self.brs > 0: log.debug("searching passive communication mode target at 212F") target = nfc.clf.RemoteTarget("212F", sensf_req=b'\0\xFF\xFF\0\0') target = self.clf.sense(target, iterations=2, interval=0.1) if target and target.sensf_res.startswith(b'\1\1\xFE'): atr_req.nfcid3 = target.sensf_res[1:9] + b'ST' self.target = target if self.target and self.target.atr_res is None: try: atr_res = self.send_req_recv_res(atr_req, 1.0) except nfc.clf.CommunicationError: pass if atr_res is None: log.debug("NFC-DEP Attribute Request failed") return None if self.target and atr_res: if self.brs > ('106A', '212F', '424F').index(self.target.brty): try: psl_res = self.send_req_recv_res(psl_req, 0.1) except nfc.clf.CommunicationError: pass if psl_res is None: log.debug("NFC-DEP Parameter Selection failed") return None self.target.brty = ('212F', '424F')[self.brs-1] self.rwt = (4096/13.56E6 * 2**(atr_res.wt if atr_res.wt < 15 else 14)) self.miu = (atr_res.lr-3 - int(self.did is not None) - int(self.nad is not None)) self.gbt = atr_res.gb self.pni = 0 log.info("running as " + str(self)) return self.gbt
Activate DEP communication with a target.
def initialize(self, params, repetition): """ Initialize experiment parameters and default values from configuration file """ self.name = params["name"] self.dataDir = params.get("datadir", "data") self.seed = params.get("seed", 42) + repetition torch.manual_seed(self.seed) np.random.seed(self.seed) random.seed(self.seed) # Training self.epochs = params.get("epochs", 1) self.batch_size = params.get("batch_size", 16) self.batches_in_epoch = params.get("batches_in_epoch", 60000) self.first_epoch_batch_size = params.get("first_epoch_batch_size", self.batch_size) self.batches_in_first_epoch = params.get("batches_in_first_epoch", self.batches_in_epoch) # Testing self.test_batch_size = params.get("test_batch_size", 1000) # Optimizer self.optimizer_class = eval(params.get("optimizer", "torch.optim.SGD")) self.optimizer_params = eval(params.get("optimizer_params", "{}")) self.lr_scheduler_class = eval(params.get("lr_scheduler", None)) self.lr_scheduler_params = eval(params.get("lr_scheduler_params", "{}")) self.loss_function = eval(params.get("loss_function", "torch.nn.functional.nll_loss")) # CNN parameters c, h, w = map(int, params.get("input_shape", "1_28_28").split("_")) self.in_channels = c self.out_channels = map(int, params.get("out_channels", "30_30").split("_")) self.kernel_size = map(int, params.get("kernel_size", "5_5").split("_")) self.stride = map(int, params.get("stride", "1_1").split("_")) self.padding = map(int, params.get("padding", "0_0").split("_")) # Compute Flatten CNN output len self.maxpool = [] self.maxpool.append( ((w + 2 * self.padding[0] - self.kernel_size[0]) // self.stride[0] + 1) // 2) self.maxpool.append( ((self.maxpool[0] + 2 * self.padding[1] - self.kernel_size[1]) // self.stride[1] + 1) // 2) self.cnn_output_len = [self.maxpool[i] * self.maxpool[i] * self.out_channels[i] for i in range(len(self.maxpool))] # Linear parameteers self.n = params.get("n", 1000) self.output_size = params.get("output_size", 10) # Sparse parameters if "c1_k" in params: self.cnn_k = map(int, params["c1_k"].split("_")) else: self.cnn_k = self.cnn_output_len self.k = params.get("k", self.n) self.k_inference_factor = params.get("k_inference_factor", 1.0) self.boost_strength = params.get("boost_strength", 1.0) self.boost_strength_factor = params.get("boost_strength_factor", 1.0) self.weight_sparsity = params.get("weight_sparsity", 1.0) self.weight_sparsity_cnn = params.get("weight_sparsity_cnn", 1.0)
Initialize experiment parameters and default values from configuration file
def water_self_diffusion_coefficient(T=None, units=None, warn=True, err_mult=None): """ Temperature-dependent self-diffusion coefficient of water. Parameters ---------- T : float Temperature (default: in Kelvin) units : object (optional) object with attributes: Kelvin, meter, kilogram warn : bool (default: True) Emit UserWarning when outside temperature range. err_mult : length 2 array_like (default: None) Perturb paramaters D0 and TS with err_mult[0]*dD0 and err_mult[1]*dTS respectively, where dD0 and dTS are the reported uncertainties in the fitted paramters. Useful for estimating error in diffusion coefficient. References ---------- Temperature-dependent self-diffusion coefficients of water and six selected molecular liquids for calibration in accurate 1H NMR PFG measurements Manfred Holz, Stefan R. Heila, Antonio Saccob; Phys. Chem. Chem. Phys., 2000,2, 4740-4742 http://pubs.rsc.org/en/Content/ArticleLanding/2000/CP/b005319h DOI: 10.1039/B005319H """ if units is None: K = 1 m = 1 s = 1 else: K = units.Kelvin m = units.meter s = units.second if T is None: T = 298.15*K _D0 = D0 * m**2 * s**-1 _TS = TS * K if err_mult is not None: _dD0 = dD0 * m**2 * s**-1 _dTS = dTS * K _D0 += err_mult[0]*_dD0 _TS += err_mult[1]*_dTS if warn and (_any(T < low_t_bound*K) or _any(T > high_t_bound*K)): warnings.warn("Temperature is outside range (0-100 degC)") return _D0*((T/_TS) - 1)**gamma
Temperature-dependent self-diffusion coefficient of water. Parameters ---------- T : float Temperature (default: in Kelvin) units : object (optional) object with attributes: Kelvin, meter, kilogram warn : bool (default: True) Emit UserWarning when outside temperature range. err_mult : length 2 array_like (default: None) Perturb paramaters D0 and TS with err_mult[0]*dD0 and err_mult[1]*dTS respectively, where dD0 and dTS are the reported uncertainties in the fitted paramters. Useful for estimating error in diffusion coefficient. References ---------- Temperature-dependent self-diffusion coefficients of water and six selected molecular liquids for calibration in accurate 1H NMR PFG measurements Manfred Holz, Stefan R. Heila, Antonio Saccob; Phys. Chem. Chem. Phys., 2000,2, 4740-4742 http://pubs.rsc.org/en/Content/ArticleLanding/2000/CP/b005319h DOI: 10.1039/B005319H
def map_names(lang="en"): """This resource returns an dictionary of the localized map names for the specified language. Only maps with events are listed - if you need a list of all maps, use ``maps.json`` instead. :param lang: The language to query the names for. :return: the response is a dictionary where the key is the map id and the value is the name of the map in the specified language. """ cache_name = "map_names.%s.json" % lang data = get_cached("map_names.json", cache_name, params=dict(lang=lang)) return dict([(item["id"], item["name"]) for item in data])
This resource returns an dictionary of the localized map names for the specified language. Only maps with events are listed - if you need a list of all maps, use ``maps.json`` instead. :param lang: The language to query the names for. :return: the response is a dictionary where the key is the map id and the value is the name of the map in the specified language.
def get_data(self, environment_title_or_num=-1, frequency=None): """ Parameters ---------- environment_title_or_num frequency: 'str', default None 'timestep', 'hourly', 'daily', 'monthly', 'annual', 'run_period' If None, will look for the smallest frequency of environment. """ # manage environment num if isinstance(environment_title_or_num, int): environment_title = tuple(self._raw_environments.keys())[environment_title_or_num] else: environment_title = environment_title_or_num if environment_title not in self._dfs: raise ValueError(f"No environment named {environment_title}. Available environments: {tuple(self._dfs)}.") # get environment dataframes environment_dfs = self._dfs[environment_title] # find first non null frequency if not given if frequency is None: for frequency in FREQUENCIES: if environment_dfs[frequency] is not None: break # check frequency if frequency not in FREQUENCIES: raise ValueError(f"Unknown frequency: {frequency}. Available frequencies: {FREQUENCIES}") return self._dfs[environment_title][frequency]
Parameters ---------- environment_title_or_num frequency: 'str', default None 'timestep', 'hourly', 'daily', 'monthly', 'annual', 'run_period' If None, will look for the smallest frequency of environment.
def log_likelihood(self): """ Notice we add the jacobian of the warping function here. """ ll = GP.log_likelihood(self) jacobian = self.warping_function.fgrad_y(self.Y_untransformed) return ll + np.log(jacobian).sum()
Notice we add the jacobian of the warping function here.
def step_random_processes(oscillators): """ Args: oscillators (list): A list of oscillator.Oscillator objects to operate on Returns: None """ if not rand.prob_bool(0.01): return amp_bias_weights = [(0.001, 1), (0.1, 100), (0.15, 40), (1, 0)] # Find out how many oscillators should move num_moves = iching.get_hexagram('NAIVE') % len(oscillators) for i in range(num_moves): pair = [gram % len(oscillators) for gram in iching.get_hexagram('THREE COIN')] amplitudes = [(gram / 64) * rand.weighted_rand(amp_bias_weights) for gram in iching.get_hexagram('THREE COIN')] oscillators[pair[0]].amplitude.drift_target = amplitudes[0] oscillators[pair[1]].amplitude.drift_target = amplitudes[1]
Args: oscillators (list): A list of oscillator.Oscillator objects to operate on Returns: None
def fit(self, x, y=None, batch_size=32, nb_epoch=10, validation_data=None, distributed=True): """ Train a model for a fixed number of epochs on a dataset. # Arguments x: Input data. A Numpy array or RDD of Sample or Image DataSet. y: Labels. A Numpy array. Default is None if x is already RDD of Sample or Image DataSet. batch_size: Number of samples per gradient update. nb_epoch: Number of iterations to train. validation_data: Tuple (x_val, y_val) where x_val and y_val are both Numpy arrays. Or RDD of Sample. Default is None if no validation is involved. distributed: Boolean. Whether to train the model in distributed mode or local mode. Default is True. In local mode, x and y must both be Numpy arrays. """ if distributed: if isinstance(x, np.ndarray) and isinstance(y, np.ndarray): training_data = to_sample_rdd(x, y) if validation_data: validation_data = to_sample_rdd(*validation_data) elif (isinstance(x, RDD) or isinstance(x, DataSet)) and not y: training_data = x else: raise TypeError("Unsupported training data type: %s" % type(x)) callBigDlFunc(self.bigdl_type, "fit", self.value, training_data, batch_size, nb_epoch, validation_data) else: if validation_data: val_x = [JTensor.from_ndarray(x) for x in to_list(validation_data[0])] val_y = JTensor.from_ndarray(validation_data[1]) else: val_x, val_y = None, None callBigDlFunc(self.bigdl_type, "fit", self.value, [JTensor.from_ndarray(x) for x in to_list(x)], JTensor.from_ndarray(y), batch_size, nb_epoch, val_x, val_y, multiprocessing.cpu_count())
Train a model for a fixed number of epochs on a dataset. # Arguments x: Input data. A Numpy array or RDD of Sample or Image DataSet. y: Labels. A Numpy array. Default is None if x is already RDD of Sample or Image DataSet. batch_size: Number of samples per gradient update. nb_epoch: Number of iterations to train. validation_data: Tuple (x_val, y_val) where x_val and y_val are both Numpy arrays. Or RDD of Sample. Default is None if no validation is involved. distributed: Boolean. Whether to train the model in distributed mode or local mode. Default is True. In local mode, x and y must both be Numpy arrays.
def get(self, key, default=None, as_int=False, setter=None): """Gets a value from the cache. :param str|unicode key: The cache key to get value for. :param default: Value to return if none found in cache. :param bool as_int: Return 64bit number instead of str. :param callable setter: Setter callable to automatically set cache value if not already cached. Required to accept a key and return a value that will be cached. :rtype: str|unicode|int """ if as_int: val = uwsgi.cache_num(key, self.name) else: val = decode(uwsgi.cache_get(key, self.name)) if val is None: if setter is None: return default val = setter(key) if val is None: return default self.set(key, val) return val
Gets a value from the cache. :param str|unicode key: The cache key to get value for. :param default: Value to return if none found in cache. :param bool as_int: Return 64bit number instead of str. :param callable setter: Setter callable to automatically set cache value if not already cached. Required to accept a key and return a value that will be cached. :rtype: str|unicode|int
def delete_state_definition(self, process_id, wit_ref_name, state_id): """DeleteStateDefinition. [Preview API] Removes a state definition in the work item type of the process. :param str process_id: ID of the process :param str wit_ref_name: The reference name of the work item type :param str state_id: ID of the state """ route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') if wit_ref_name is not None: route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str') if state_id is not None: route_values['stateId'] = self._serialize.url('state_id', state_id, 'str') self._send(http_method='DELETE', location_id='31015d57-2dff-4a46-adb3-2fb4ee3dcec9', version='5.0-preview.1', route_values=route_values)
DeleteStateDefinition. [Preview API] Removes a state definition in the work item type of the process. :param str process_id: ID of the process :param str wit_ref_name: The reference name of the work item type :param str state_id: ID of the state
def single_page_members(self, page_number=1): """获取单个页面内的小组成员信息 :param page_number: 页码 :return: 包含小组成员信息的列表 返回值示例: :: [{ 'id': 123, # member_id 'username': 'jim', # username 'nickname': 'Jim', # 昵称 'role': u'小组长', # 身份 'points': 1234, # 贡献成长值 'days': 100, # 组龄 'rate': 99.9, # 打卡率 'checked_yesterday': True, # 昨天是否打卡 'checked': False, # 今天是否打卡 }, { # ... }] """ url = '%s?page=%s' % (self.dismiss_url, page_number) html = self.request(url).text soup = BeautifulSoup(html) members_html = soup.find(id='members') if not members_html: return [] def get_tag_string(html, class_, tag='td', n=0): """获取单个 tag 的文本数据""" return html.find_all(tag, class_=class_)[n].get_text().strip() members = [] # 获取成员信息 for member_html in members_html.find_all('tr', class_='member'): _id = member_html.attrs['data-id'] try: user_url = member_html.find_all('td', class_='user' )[0].find('a').attrs['href'] username = self.get_username('http://www.shanbay.com' + user_url) except Exception as e: logger.exception(e) username = '' try: nickname = get_tag_string(member_html, 'nickname', 'a') except Exception as e: logger.exception(e) nickname = username try: role = member_html.find_all('td', class_='user' )[0].find_all('span', class_='label' )[0].get_text().strip() except IndexError: role = '' except Exception as e: logger.exception(e) role = '' member = { 'id': int(_id), 'username': username, # 昵称 'nickname': nickname, # 身份 'role': role, # 贡献成长值 'points': int(get_tag_string(member_html, 'points')), # 组龄 'days': int(get_tag_string(member_html, 'days')), # 打卡率 'rate': float(get_tag_string(member_html, 'rate' ).split('%')[0]), # 昨天是否打卡 'checked_yesterday': get_tag_string(member_html, 'checked' ) != '未打卡', # 今天是否打卡 'checked': get_tag_string(member_html, 'checked', n=1) != '未打卡', } members.append(member) return members
获取单个页面内的小组成员信息 :param page_number: 页码 :return: 包含小组成员信息的列表 返回值示例: :: [{ 'id': 123, # member_id 'username': 'jim', # username 'nickname': 'Jim', # 昵称 'role': u'小组长', # 身份 'points': 1234, # 贡献成长值 'days': 100, # 组龄 'rate': 99.9, # 打卡率 'checked_yesterday': True, # 昨天是否打卡 'checked': False, # 今天是否打卡 }, { # ... }]
def show_fabric_trunk_info_input_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_fabric_trunk_info = ET.Element("show_fabric_trunk_info") config = show_fabric_trunk_info input = ET.SubElement(show_fabric_trunk_info, "input") rbridge_id = ET.SubElement(input, "rbridge-id") rbridge_id.text = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def vertices(self): """Return an array (Nf, 3) of vertices. If only faces exist, the function computes the vertices and returns them. If no vertices or faces are specified, the function returns None. """ if self._faces is None: if self._vertices is None: return None self.triangulate() return self._vertices
Return an array (Nf, 3) of vertices. If only faces exist, the function computes the vertices and returns them. If no vertices or faces are specified, the function returns None.
def _get_event_and_context(self, event, arg_type): """Return an INDRA Event based on an event entry.""" eid = _choose_id(event, arg_type) ev = self.concept_dict[eid] concept, metadata = self._make_concept(ev) ev_delta = {'adjectives': [], 'states': get_states(ev), 'polarity': get_polarity(ev)} context = self._make_context(ev) event_obj = Event(concept, delta=ev_delta, context=context) return event_obj
Return an INDRA Event based on an event entry.
def _format_params(self, type_, params): """Reformat some of the parameters for sapi.""" if 'initial_state' in params: # NB: at this moment the error raised when initial_state does not match lin/quad (in # active qubits) is not very informative, but there is also no clean way to check here # that they match because lin can be either a list or a dict. In the future it would be # good to check. initial_state = params['initial_state'] if isinstance(initial_state, Mapping): initial_state_list = [3]*self.properties['num_qubits'] low = -1 if type_ == 'ising' else 0 for v, val in initial_state.items(): if val == 3: continue if val <= 0: initial_state_list[v] = low else: initial_state_list[v] = 1 params['initial_state'] = initial_state_list
Reformat some of the parameters for sapi.
def version(self) -> Optional[str]: """ 获取 http 版本 """ if self._version is None: self._version = self._parser.get_http_version() return self._version
获取 http 版本
def get_biome_color_based_on_elevation(world, elev, x, y, rng): ''' This is the "business logic" for determining the base biome color in satellite view. This includes generating some "noise" at each spot in a pixel's rgb value, potentially modifying the noise based on elevation, and finally incorporating this with the base biome color. The basic rules regarding noise generation are: - Oceans have no noise added - land tiles start with noise somewhere inside (-NOISE_RANGE, NOISE_RANGE) for each rgb value - land tiles with high elevations further modify the noise by set amounts (to drain some of the color and make the map look more like mountains) The biome's base color may be interpolated with a predefined mountain brown color if the elevation is high enough. Finally, the noise plus the biome color are added and returned. rng refers to an instance of a random number generator used to draw the random samples needed by this function. ''' v = world.biome_at((x, y)).name() biome_color = _biome_satellite_colors[v] # Default is no noise - will be overwritten if this tile is land noise = (0, 0, 0) if world.is_land((x, y)): ## Generate some random noise to apply to this pixel # There is noise for each element of the rgb value # This noise will be further modified by the height of this tile noise = rng.randint(-NOISE_RANGE, NOISE_RANGE, size=3) # draw three random numbers at once ####### Case 1 - elevation is very high ######## if elev > HIGH_MOUNTAIN_ELEV: # Modify the noise to make the area slightly brighter to simulate snow-topped mountains. noise = add_colors(noise, HIGH_MOUNTAIN_NOISE_MODIFIER) # Average the biome's color with the MOUNTAIN_COLOR to tint the terrain biome_color = average_colors(biome_color, MOUNTAIN_COLOR) ####### Case 2 - elevation is high ######## elif elev > MOUNTAIN_ELEV: # Modify the noise to make this tile slightly darker, especially draining the green noise = add_colors(noise, MOUNTAIN_NOISE_MODIFIER) # Average the biome's color with the MOUNTAIN_COLOR to tint the terrain biome_color = average_colors(biome_color, MOUNTAIN_COLOR) ####### Case 3 - elevation is somewhat high ######## elif elev > HIGH_HILL_ELEV: noise = add_colors(noise, HIGH_HILL_NOISE_MODIFIER) ####### Case 4 - elevation is a little bit high ######## elif elev > HILL_ELEV: noise = add_colors(noise, HILL_NOISE_MODIFIER) # There is also a minor base modifier to the pixel's rgb value based on height modification_amount = int(elev / BASE_ELEVATION_INTENSITY_MODIFIER) base_elevation_modifier = (modification_amount, modification_amount, modification_amount) this_tile_color = add_colors(biome_color, noise, base_elevation_modifier) return this_tile_color
This is the "business logic" for determining the base biome color in satellite view. This includes generating some "noise" at each spot in a pixel's rgb value, potentially modifying the noise based on elevation, and finally incorporating this with the base biome color. The basic rules regarding noise generation are: - Oceans have no noise added - land tiles start with noise somewhere inside (-NOISE_RANGE, NOISE_RANGE) for each rgb value - land tiles with high elevations further modify the noise by set amounts (to drain some of the color and make the map look more like mountains) The biome's base color may be interpolated with a predefined mountain brown color if the elevation is high enough. Finally, the noise plus the biome color are added and returned. rng refers to an instance of a random number generator used to draw the random samples needed by this function.
def anonymous_login(self): """Login as anonymous user :return: logon result, see `CMsgClientLogonResponse.eresult <https://github.com/ValvePython/steam/blob/513c68ca081dc9409df932ad86c66100164380a6/protobufs/steammessages_clientserver.proto#L95-L118>`_ :rtype: :class:`.EResult` """ self._LOG.debug("Attempting Anonymous login") self._pre_login() self.username = None self.login_key = None message = MsgProto(EMsg.ClientLogon) message.header.steamid = SteamID(type='AnonUser', universe='Public') message.body.protocol_version = 65579 self.send(message) resp = self.wait_msg(EMsg.ClientLogOnResponse, timeout=30) return EResult(resp.body.eresult) if resp else EResult.Fail
Login as anonymous user :return: logon result, see `CMsgClientLogonResponse.eresult <https://github.com/ValvePython/steam/blob/513c68ca081dc9409df932ad86c66100164380a6/protobufs/steammessages_clientserver.proto#L95-L118>`_ :rtype: :class:`.EResult`
def _draw_circle(self, pos_x, pos_y, radius, depth, stroke_width=1., fill_color=None, border_color=None, from_angle=0., to_angle=2 * pi): """Draws a circle Draws a circle with a line segment a desired position with desired size. :param float pos_x: Center x position :param float pos_y: Center y position :param float depth: The Z layer :param float radius: Radius of the circle """ visible = False # Check whether circle center is in the viewport if not self.point_outside_view((pos_x, pos_y)): visible = True # Check whether at least on point on the border of the circle is within the viewport if not visible: for i in range(0, 8): angle = 2 * pi / 8. * i x = pos_x + cos(angle) * radius y = pos_y + sin(angle) * radius if not self.point_outside_view((x, y)): visible = True break if not visible: return False angle_sum = to_angle - from_angle if angle_sum < 0: angle_sum = float(to_angle + 2 * pi - from_angle) segments = self.pixel_to_size_ratio() * radius * 1.5 segments = max(4, segments) segments = int(round(segments * angle_sum / (2. * pi))) types = [] if fill_color is not None: types.append(GL_POLYGON) if border_color is not None: types.append(GL_LINE_LOOP) for type in types: if type == GL_POLYGON: fill_color.set() else: self._set_closest_stroke_width(stroke_width) border_color.set() glBegin(type) angle = from_angle for i in range(0, segments): x = pos_x + cos(angle) * radius y = pos_y + sin(angle) * radius glVertex3f(x, y, depth) angle += angle_sum / (segments - 1) if angle > 2 * pi: angle -= 2 * pi if i == segments - 2: angle = to_angle glEnd() return True
Draws a circle Draws a circle with a line segment a desired position with desired size. :param float pos_x: Center x position :param float pos_y: Center y position :param float depth: The Z layer :param float radius: Radius of the circle
def delete_message(self, messageid="", folderid="", stackid=""): """Delete a message or a message stack :param folderid: The folder to delete the message from, defaults to inbox :param messageid: The message to delete :param stackid: The stack to delete """ if self.standard_grant_type is not "authorization_code": raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.") response = self._req('/messages/delete', post_data={ 'folderid' : folderid, 'messageid' : messageid, 'stackid' : stackid }) return response
Delete a message or a message stack :param folderid: The folder to delete the message from, defaults to inbox :param messageid: The message to delete :param stackid: The stack to delete
def _GetStringValue(self, data_dict, name, default_value=None): """Retrieves a specific string value from the data dict. Args: data_dict (dict[str, list[str]): values per name. name (str): name of the value to retrieve. default_value (Optional[object]): value to return if the name has no value set in data_dict. Returns: str: value represented as a string. """ values = data_dict.get(name, None) if not values: return default_value for index, value in enumerate(values): if ',' in value: values[index] = '"{0:s}"'.format(value) return ', '.join(values)
Retrieves a specific string value from the data dict. Args: data_dict (dict[str, list[str]): values per name. name (str): name of the value to retrieve. default_value (Optional[object]): value to return if the name has no value set in data_dict. Returns: str: value represented as a string.
def parse_orgtable(lines): """ Parse an org-table (input as a list of strings split by newline) into a Pandas data frame. Parameters ---------- lines : string an org-table input as a list of strings split by newline Returns ------- dataframe : pandas.DataFrame A data frame containing the org-table's data """ def parseline(l): w = l.split('|')[1:-1] return [wi.strip() for wi in w] columns = parseline(lines[0]) data = [] for line in lines[2:]: data.append(map(str, parseline(line))) dataframe = _pd.DataFrame(data=data, columns=columns) dataframe.set_index("RunNo") return dataframe
Parse an org-table (input as a list of strings split by newline) into a Pandas data frame. Parameters ---------- lines : string an org-table input as a list of strings split by newline Returns ------- dataframe : pandas.DataFrame A data frame containing the org-table's data
def strings_to_integers(strings: Iterable[str]) -> Iterable[int]: """ Convert a list of strings to a list of integers. :param strings: a list of string :return: a list of converted integers .. doctest:: >>> strings_to_integers(['1', '1.0', '-0.2']) [1, 1, 0] """ return strings_to_(strings, lambda x: int(float(x)))
Convert a list of strings to a list of integers. :param strings: a list of string :return: a list of converted integers .. doctest:: >>> strings_to_integers(['1', '1.0', '-0.2']) [1, 1, 0]
def multipoint(self, points): """Creates a MULTIPOINT shape. Points is a list of xy values.""" shapeType = MULTIPOINT points = [points] # nest the points inside a list to be compatible with the generic shapeparts method self._shapeparts(parts=points, shapeType=shapeType)
Creates a MULTIPOINT shape. Points is a list of xy values.
def is_step_visible(self, step): """ Returns whether the given `step` should be included in the wizard; it is included if either the form is idempotent or not filled in before. """ return self.idempotent_dict.get(step, True) or \ step not in self.storage.validated_step_data
Returns whether the given `step` should be included in the wizard; it is included if either the form is idempotent or not filled in before.