code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def remaining_quota(self, remaining_quota): if remaining_quota is None: raise ValueError("Invalid value for `remaining_quota`, must not be `None`") if remaining_quota is not None and remaining_quota < 0: raise ValueError("Invalid value for `remaining_quota`, must be a value greater than or equal to `0`") self._remaining_quota = remaining_quota
Sets the remaining_quota of this ServicePackageMetadata. Current available service package quota. :param remaining_quota: The remaining_quota of this ServicePackageMetadata. :type: int
def get_resource_solvers(self, resource): solvers_classes = [s for s in self.resource_solver_classes if s.can_solve(resource)] if solvers_classes: solvers = [] for solver_class in solvers_classes: if solver_class not in self._resource_solvers_cache: self._resource_solvers_cache[solver_class] = solver_class(self) solvers.append(self._resource_solvers_cache[solver_class]) return solvers raise SolverNotFound(self, resource)
Returns the resource solvers that can solve the given resource. Arguments --------- resource : dataql.resources.Resource An instance of a subclass of ``Resource`` for which we want to get the solver classes that can solve it. Returns ------- list The list of resource solvers instances that can solve the given resource. Raises ------ dataql.solvers.exceptions.SolverNotFound When no solver is able to solve the given resource. Example ------- >>> from dataql.resources import Field, List >>> registry = Registry() >>> registry.get_resource_solvers(Field(name='foo')) [<AttributeSolver>] >>> registry.get_resource_solvers(List(name='foo')) [<ListSolver>] >>> registry.get_resource_solvers(None) # doctest: +ELLIPSIS Traceback (most recent call last): dataql.solvers.exceptions.SolverNotFound: No solvers found for this kind of object:...
def calc_Cmin(mh, mc, Cph, Cpc): r Ch = mh*Cph Cc = mc*Cpc return min(Ch, Cc)
r'''Returns the heat capacity rate for the minimum stream having flows `mh` and `mc`, with averaged heat capacities `Cph` and `Cpc`. .. math:: C_c = m_cC_{p,c} C_h = m_h C_{p,h} C_{min} = \min(C_c, C_h) Parameters ---------- mh : float Mass flow rate of hot stream, [kg/s] mc : float Mass flow rate of cold stream, [kg/s] Cph : float Averaged heat capacity of hot stream, [J/kg/K] Cpc : float Averaged heat capacity of cold stream, [J/kg/K] Returns ------- Cmin : float The heat capacity rate of the smaller fluid, [W/K] Notes ----- Used with the effectiveness method for heat exchanger design. Technically, it doesn't matter if the hot and cold streams are in the right order for the input, but it is easiest to use this function when the order is specified. Examples -------- >>> calc_Cmin(mh=22., mc=5.5, Cph=2200, Cpc=4400.) 24200.0 References ---------- .. [1] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ: Wiley, 2011.
def bucket_ops(bid, api=""): try: yield 42 except ClientError as e: code = e.response['Error']['Code'] log.info( "bucket error bucket:%s error:%s", bid, e.response['Error']['Code']) if code == "NoSuchBucket": pass elif code == 'AccessDenied': connection.sadd('buckets-denied', bid) else: connection.hset( 'buckets-unknown-errors', bid, "%s:%s" % (api, e.response['Error']['Code'])) except Exception as e: connection.hset( 'buckets-unknown-errors', bid, "%s:%s" % (api, str(e))) raise
Context manager for dealing with s3 errors in one place bid: bucket_id in form of account_name:bucket_name
def renew_secret(client, creds, opt): if opt.reuse_token: return seconds = grok_seconds(opt.lease) if not seconds: raise aomi.exceptions.AomiCommand("invalid lease %s" % opt.lease) renew = None if client.version: v_bits = client.version.split('.') if int(v_bits[0]) == 0 and \ int(v_bits[1]) <= 8 and \ int(v_bits[2]) <= 0: r_obj = { 'increment': seconds } r_path = "v1/sys/renew/{0}".format(creds['lease_id']) renew = client._post(r_path, json=r_obj).json() if not renew: renew = client.renew_secret(creds['lease_id'], seconds) if not renew or (seconds - renew['lease_duration'] >= 5): client.revoke_self_token() e_msg = 'Unable to renew with desired lease' raise aomi.exceptions.VaultConstraint(e_msg)
Renews a secret. This will occur unless the user has specified on the command line that it is not neccesary
def colors_to_materials(colors, count=None): rgba = to_rgba(colors) if util.is_shape(rgba, (4,)) and count is not None: diffuse = rgba.reshape((-1, 4)) index = np.zeros(count, dtype=np.int) elif util.is_shape(rgba, (-1, 4)): unique, index = grouping.unique_rows(rgba) diffuse = rgba[unique] else: raise ValueError('Colors not convertible!') return diffuse, index
Convert a list of colors into a list of unique materials and material indexes. Parameters ----------- colors : (n, 3) or (n, 4) float RGB or RGBA colors count : int Number of entities to apply color to Returns ----------- diffuse : (m, 4) int Colors index : (count,) int Index of each color
def label(self, name, value, cluster_ids=None): if cluster_ids is None: cluster_ids = self.cluster_view.selected if not hasattr(cluster_ids, '__len__'): cluster_ids = [cluster_ids] if len(cluster_ids) == 0: return self.cluster_meta.set(name, cluster_ids, value) self._global_history.action(self.cluster_meta)
Assign a label to clusters. Example: `quality 3`
def create_virtualenv(venv_dir, use_venv_module=True): if not use_venv_module: try: check_call(['virtualenv', venv_dir, '--no-site-packages']) except OSError: raise Exception('You probably dont have virtualenv installed: sudo apt-get install python-virtualenv') else: check_call([sys.executable or 'python', '-m', 'venv', venv_dir])
creates a new virtualenv in venv_dir By default, the built-in venv module is used. On older versions of python, you may set use_venv_module to False to use virtualenv
def to_parfiles(self,prefix): if self.isnull().values.any(): warnings.warn("NaN in par ensemble",PyemuWarning) if self.istransformed: self._back_transform(inplace=True) par_df = self.pst.parameter_data.loc[:, ["parnme","parval1","scale","offset"]].copy() for real in self.index: par_file = "{0}{1}.par".format(prefix,real) par_df.loc[:,"parval1"] =self.loc[real,:] write_parfile(par_df,par_file)
write the parameter ensemble to PEST-style parameter files Parameters ---------- prefix: str file prefix for par files Note ---- this function back-transforms inplace with respect to log10 before writing
def print_long(filename, stat, print_func): size = stat_size(stat) mtime = stat_mtime(stat) file_mtime = time.localtime(mtime) curr_time = time.time() if mtime > (curr_time + SIX_MONTHS) or mtime < (curr_time - SIX_MONTHS): print_func('%6d %s %2d %04d %s' % (size, MONTH[file_mtime[1]], file_mtime[2], file_mtime[0], decorated_filename(filename, stat))) else: print_func('%6d %s %2d %02d:%02d %s' % (size, MONTH[file_mtime[1]], file_mtime[2], file_mtime[3], file_mtime[4], decorated_filename(filename, stat)))
Prints detailed information about the file passed in.
def and_raises(self, *errors): "Expects an error or more to be raised from the given expectation." for error in errors: self.__expect(Expectation.raises, error)
Expects an error or more to be raised from the given expectation.
def set_join_rule(self, room_id, join_rule): content = { "join_rule": join_rule } return self.send_state_event(room_id, "m.room.join_rules", content)
Set the rule for users wishing to join the room. Args: room_id(str): The room to set the rules for. join_rule(str): The chosen rule. One of: ["public", "knock", "invite", "private"]
def _correctIsotopeImpurities(matrix, intensities): correctedIntensities, _ = scipy.optimize.nnls(matrix, intensities) return correctedIntensities
Corrects observed reporter ion intensities for isotope impurities. :params matrix: a matrix (2d nested list) containing numbers, each isobaric channel must be present as a COLUMN. Use maspy.isobar._transposeMatrix() if channels are written in rows. :param intensities: numpy array of observed reporter ion intensities. :returns: a numpy array of reporter ion intensities corrected for isotope impurities.
def _get_stddevs(self, C, stddev_types, mag, num_sites): stddevs = [] for _ in stddev_types: if mag < 7.16: sigma = C['c11'] + C['c12'] * mag elif mag >= 7.16: sigma = C['c13'] stddevs.append(np.zeros(num_sites) + sigma) return stddevs
Return total standard deviation as for equation 35, page 1021.
def add_device_callback(self, devices, callback): if not devices: return False if not isinstance(devices, (tuple, list)): devices = [devices] for device in devices: device_id = device if isinstance(device, AbodeDevice): device_id = device.device_id if not self._abode.get_device(device_id): raise AbodeException((ERROR.EVENT_DEVICE_INVALID)) _LOGGER.debug( "Subscribing to updated for device_id: %s", device_id) self._device_callbacks[device_id].append((callback)) return True
Register a device callback.
def putnotify(self, name, *args): self.queues[name][0].put(*args) self.queues[name][1].set()
Puts data into queue and alerts listeners
def get_user(self, name): users = self.data['users'] for user in users: if user['name'] == name: return user raise KubeConfError("user name not found.")
Get user from kubeconfig.
def alterar(self, id_script_type, type, description): if not is_valid_int_param(id_script_type): raise InvalidParameterError( u'The identifier of Script Type is invalid or was not informed.') script_type_map = dict() script_type_map['type'] = type script_type_map['description'] = description url = 'scripttype/' + str(id_script_type) + '/' code, xml = self.submit({'script_type': script_type_map}, 'PUT', url) return self.response(code, xml)
Change Script Type from by the identifier. :param id_script_type: Identifier of the Script Type. Integer value and greater than zero. :param type: Script Type type. String with a minimum 3 and maximum of 40 characters :param description: Script Type description. String with a minimum 3 and maximum of 100 characters :return: None :raise InvalidParameterError: The identifier of Script Type, type or description is null and invalid. :raise TipoRoteiroNaoExisteError: Script Type not registered. :raise NomeTipoRoteiroDuplicadoError: Type script already registered with informed. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
def save_performance(db, job_id, records): rows = [(job_id, rec['operation'], rec['time_sec'], rec['memory_mb'], int(rec['counts'])) for rec in records] db.insert('performance', 'job_id operation time_sec memory_mb counts'.split(), rows)
Save in the database the performance information about the given job. :param db: a :class:`openquake.server.dbapi.Db` instance :param job_id: a job ID :param records: a list of performance records
def map(self, map_fn, desc=None): if desc is None: desc = getattr(map_fn, '__name__', '') desc = u'map({})'.format(desc) return self.transform(lambda xs: (map_fn(x) for x in xs), desc=desc)
Return a copy of this query, with the values mapped through `map_fn`. Args: map_fn (callable): A callable that takes a single argument and returns a new value. Keyword Args: desc (str): A description of the mapping transform, for use in log message. Defaults to the name of the map function. Returns: Query
def pyxwriter(self): model = self.Model() if hasattr(self, 'Parameters'): model.parameters = self.Parameters(vars(self)) else: model.parameters = parametertools.Parameters(vars(self)) if hasattr(self, 'Sequences'): model.sequences = self.Sequences(model=model, **vars(self)) else: model.sequences = sequencetools.Sequences(model=model, **vars(self)) return PyxWriter(self, model, self.pyxfilepath)
Update the pyx file.
def exponential_backoff(self): last_service_switch = self._service_starttime if not last_service_switch: return time_since_last_switch = time.time() - last_service_switch if not self._service_rapidstarts: self._service_rapidstarts = 0 minimum_wait = 0.1 * (2 ** self._service_rapidstarts) minimum_wait = min(5, minimum_wait) if time_since_last_switch > 10: self._service_rapidstarts = 0 return self._service_rapidstarts += 1 self.log.debug("Slowing down service starts (%.1f seconds)", minimum_wait) time.sleep(minimum_wait)
A function that keeps waiting longer and longer the more rapidly it is called. It can be used to increasingly slow down service starts when they keep failing.
def create(cls, data, id_=None, **kwargs): r from .models import RecordMetadata with db.session.begin_nested(): record = cls(data) before_record_insert.send( current_app._get_current_object(), record=record ) record.validate(**kwargs) record.model = RecordMetadata(id=id_, json=record) db.session.add(record.model) after_record_insert.send( current_app._get_current_object(), record=record ) return record
r"""Create a new record instance and store it in the database. #. Send a signal :data:`invenio_records.signals.before_record_insert` with the new record as parameter. #. Validate the new record data. #. Add the new record in the database. #. Send a signal :data:`invenio_records.signals.after_record_insert` with the new created record as parameter. :Keyword Arguments: * **format_checker** -- An instance of the class :class:`jsonschema.FormatChecker`, which contains validation rules for formats. See :func:`~invenio_records.api.RecordBase.validate` for more details. * **validator** -- A :class:`jsonschema.IValidator` class that will be used to validate the record. See :func:`~invenio_records.api.RecordBase.validate` for more details. :param data: Dict with the record metadata. :param id_: Specify a UUID to use for the new record, instead of automatically generated. :returns: A new :class:`Record` instance.
def authenticate(self, transport, account_name, password): if not isinstance(transport, ZimbraClientTransport): raise ZimbraClientException('Invalid transport') if util.empty(account_name): raise AuthException('Empty account name')
Authenticates account, if no password given tries to pre-authenticate. @param transport: transport to use for method calls @param account_name: account name @param password: account password @return: AuthToken if authentication succeeded @raise AuthException: if authentication fails
def generate_page(self, path, template, **kwargs): directory = None if kwargs.get('page'): directory = kwargs['page'].dir path = self._get_dist_path(path, directory=directory) if not path.endswith('.html'): path = path + '.html' if not os.path.isdir(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) html = self._get_template(template).render(**kwargs) with open(path, 'w+') as file: file.write(html)
Generate the HTML for a single page. You usually don't need to call this method manually, it is used by a lot of other, more end-user friendly methods. Args: path (str): Where to place the page relative to the root URL. Usually something like "index", "about-me", "projects/example", etc. template (str): Which jinja template to use to render the page. **kwargs: Kwargs will be passed on to the jinja template. Also, if the `page` kwarg is passed, its directory attribute will be prepended to the path.
def hist(self, by=None, bins=10, **kwds): return self(kind='hist', by=by, bins=bins, **kwds)
Draw one histogram of the DataFrame's columns. A histogram is a representation of the distribution of data. This function groups the values of all given Series in the DataFrame into bins and draws all bins in one :class:`matplotlib.axes.Axes`. This is useful when the DataFrame's Series are in a similar scale. Parameters ---------- by : str or sequence, optional Column in the DataFrame to group by. bins : int, default 10 Number of histogram bins to be used. **kwds Additional keyword arguments are documented in :meth:`DataFrame.plot`. Returns ------- class:`matplotlib.AxesSubplot` Return a histogram plot. See Also -------- DataFrame.hist : Draw histograms per DataFrame's Series. Series.hist : Draw a histogram with Series' data. Examples -------- When we draw a dice 6000 times, we expect to get each value around 1000 times. But when we draw two dices and sum the result, the distribution is going to be quite different. A histogram illustrates those distributions. .. plot:: :context: close-figs >>> df = pd.DataFrame( ... np.random.randint(1, 7, 6000), ... columns = ['one']) >>> df['two'] = df['one'] + np.random.randint(1, 7, 6000) >>> ax = df.plot.hist(bins=12, alpha=0.5)
def undisplayable_info(obj, html=False): "Generate helpful message regarding an undisplayable object" collate = '<tt>collate</tt>' if html else 'collate' info = "For more information, please consult the Composing Data tutorial (http://git.io/vtIQh)" if isinstance(obj, HoloMap): error = "HoloMap of %s objects cannot be displayed." % obj.type.__name__ remedy = "Please call the %s method to generate a displayable object" % collate elif isinstance(obj, Layout): error = "Layout containing HoloMaps of Layout or GridSpace objects cannot be displayed." remedy = "Please call the %s method on the appropriate elements." % collate elif isinstance(obj, GridSpace): error = "GridSpace containing HoloMaps of Layouts cannot be displayed." remedy = "Please call the %s method on the appropriate elements." % collate if not html: return '\n'.join([error, remedy, info]) else: return "<center>{msg}</center>".format(msg=('<br>'.join( ['<b>%s</b>' % error, remedy, '<i>%s</i>' % info])))
Generate helpful message regarding an undisplayable object
def lookup_url(self, url): if type(url) is not str: url = url.encode('utf8') if not url.strip(): raise ValueError("Empty input string.") url_hashes = URL(url).hashes try: list_names = self._lookup_hashes(url_hashes) self.storage.commit() except Exception: self.storage.rollback() raise if list_names: return list_names return None
Look up specified URL in Safe Browsing threat lists.
def _memory_profile(with_gc=False): import utool as ut if with_gc: garbage_collect() import guppy hp = guppy.hpy() print('[hpy] Waiting for heap output...') heap_output = hp.heap() print(heap_output) print('[hpy] total heap size: ' + ut.byte_str2(heap_output.size)) ut.util_resources.memstats()
Helper for memory debugging. Mostly just a namespace where I experiment with guppy and heapy. References: http://stackoverflow.com/questions/2629680/deciding-between-subprocess-multiprocessing-and-thread-in-python Reset Numpy Memory:: %reset out %reset array
def list_bindings_for_vhost(self, vhost): return self._api_get('/api/bindings/{}'.format( urllib.parse.quote_plus(vhost) ))
A list of all bindings in a given virtual host. :param vhost: The vhost name :type vhost: str
def get_field_errors(self, field): identifier = format_html('{0}[\'{1}\']', self.form_name, field.name) errors = self.errors.get(field.html_name, []) return self.error_class([SafeTuple( (identifier, self.field_error_css_classes, '$pristine', '$pristine', 'invalid', e)) for e in errors])
Return server side errors. Shall be overridden by derived forms to add their extra errors for AngularJS.
def forw_dfs(self, start, end=None): return list(self.iterdfs(start, end, forward=True))
Returns a list of nodes in some forward DFS order. Starting with the start node the depth first search proceeds along outgoing edges.
def add_device_not_active_callback(self, callback): _LOGGER.debug('Added new callback %s ', callback) self._cb_device_not_active.append(callback)
Register callback to be invoked when a device is not responding.
def children(self, node, relations=None): g = self.get_graph() if node in g: children = list(g.successors(node)) if relations is None: return children else: rset = set(relations) return [c for c in children if len(self.child_parent_relations(c, node, graph=g).intersection(rset)) > 0 ] else: return []
Return all direct children of specified node. Wraps networkx by default. Arguments --------- node: string identifier for node in ontology relations: list of strings list of relation (object property) IDs used to filter
def run(self): states = open(self.states, 'r').read().splitlines() for state in states: url = self.build_url(state) log = "Downloading State < {0} > from < {1} >" logging.info(log.format(state, url)) tmp = self.download(self.output, url, self.overwrite) self.s3.store(self.extract(tmp, self.tmp2poi(tmp)))
For each state in states file build url and download file
def _scalar_operations(self, axis, scalar, func): if isinstance(scalar, (list, np.ndarray, pandas.Series)): new_index = self.index if axis == 0 else self.columns def list_like_op(df): if axis == 0: df.index = new_index else: df.columns = new_index return func(df) new_data = self._map_across_full_axis( axis, self._prepare_method(list_like_op) ) return self.__constructor__(new_data, self.index, self.columns) else: return self._map_partitions(self._prepare_method(func))
Handler for mapping scalar operations across a Manager. Args: axis: The axis index object to execute the function on. scalar: The scalar value to map. func: The function to use on the Manager with the scalar. Returns: A new QueryCompiler with updated data and new index.
def repo_name(self): ds = [[x.repo_name] for x in self.repos] df = pd.DataFrame(ds, columns=['repository']) return df
Returns a DataFrame of the repo names present in this project directory :return: DataFrame
def _load_from_file(self, filename): if not tf.gfile.Exists(filename): raise ValueError("File %s not found" % filename) with tf.gfile.Open(filename) as f: self._load_from_file_object(f)
Load from a vocab file.
def _do_add_floating_ip_asr1k(self, floating_ip, fixed_ip, vrf, ex_gw_port): vlan = ex_gw_port['hosting_info']['segmentation_id'] hsrp_grp = ex_gw_port[ha.HA_INFO]['group'] LOG.debug("add floating_ip: %(fip)s, fixed_ip: %(fixed_ip)s, " "vrf: %(vrf)s, ex_gw_port: %(port)s", {'fip': floating_ip, 'fixed_ip': fixed_ip, 'vrf': vrf, 'port': ex_gw_port}) confstr = (asr1k_snippets.SET_STATIC_SRC_TRL_NO_VRF_MATCH % (fixed_ip, floating_ip, vrf, hsrp_grp, vlan)) self._edit_running_config(confstr, 'SET_STATIC_SRC_TRL_NO_VRF_MATCH')
To implement a floating ip, an ip static nat is configured in the underlying router ex_gw_port contains data to derive the vlan associated with related subnet for the fixed ip. The vlan in turn is applied to the redundancy parameter for setting the IP NAT.
def autodiscover(): import copy from django.conf import settings from django.utils.importlib import import_module for app in settings.INSTALLED_APPS: before_import_registry = copy.copy(gargoyle._registry) try: import_module('%s.gargoyle' % app) except: gargoyle._registry = before_import_registry __import__('gargoyle.builtins')
Auto-discover INSTALLED_APPS admin.py modules and fail silently when not present. This forces an import on them to register any admin bits they may want.
def length_curve(obj): if not isinstance(obj, abstract.Curve): raise GeomdlException("Input shape must be an instance of abstract.Curve class") length = 0.0 evalpts = obj.evalpts num_evalpts = len(obj.evalpts) for idx in range(num_evalpts - 1): length += linalg.point_distance(evalpts[idx], evalpts[idx + 1]) return length
Computes the approximate length of the parametric curve. Uses the following equation to compute the approximate length: .. math:: \\sum_{i=0}^{n-1} \\sqrt{P_{i + 1}^2-P_{i}^2} where :math:`n` is number of evaluated curve points and :math:`P` is the n-dimensional point. :param obj: input curve :type obj: abstract.Curve :return: length :rtype: float
def umount(self, forced=True): for child in self._children: if hasattr(child, "umount"): child.umount(forced)
Umount all mountable distribution points. Defaults to using forced method.
def get_nmr_prize_pool(self, round_num=0, tournament=1): tournaments = self.get_competitions(tournament) tournaments.sort(key=lambda t: t['number']) if round_num == 0: t = tournaments[-1] else: tournaments = [t for t in tournaments if t['number'] == round_num] if len(tournaments) == 0: raise ValueError("invalid round number") t = tournaments[0] return t['prizePoolNmr']
Get NMR prize pool for the given round and tournament. Args: round_num (int, optional): The round you are interested in, defaults to current round. tournament (int, optional): ID of the tournament, defaults to 1 Returns: decimal.Decimal: prize pool in NMR Raises: Value Error: in case of invalid round number
def remove_behaviour(self, behaviour): if not self.has_behaviour(behaviour): raise ValueError("This behaviour is not registered") index = self.behaviours.index(behaviour) self.behaviours[index].kill() self.behaviours.pop(index)
Removes a behaviour from the agent. The behaviour is first killed. Args: behaviour (spade.behaviour.CyclicBehaviour): the behaviour instance to be removed
def simple_state_machine(): from random import random from furious.async import Async number = random() logging.info('Generating a number... %s', number) if number > 0.25: logging.info('Continuing to do stuff.') return Async(target=simple_state_machine) return number
Pick a number, if it is more than some cuttoff continue the chain.
def listEverything(matching=False): pages=pageNames() if matching: pages=[x for x in pages if matching in x] for i,page in enumerate(pages): pages[i]="%s%s (%s)"%(pageFolder(page),page,getPageType(page)) print("\n".join(sorted(pages)))
Prints every page in the project to the console. Args: matching (str, optional): if given, only return names with this string in it
def _generate_examples(self, images_dir_path, csv_path=None, csv_usage=None): if csv_path: with tf.io.gfile.GFile(csv_path) as csv_f: reader = csv.DictReader(csv_f) data = [(row["image"], int(row["level"])) for row in reader if csv_usage is None or row["Usage"] == csv_usage] else: data = [(fname[:-5], -1) for fname in tf.io.gfile.listdir(images_dir_path) if fname.endswith(".jpeg")] for name, label in data: yield { "name": name, "image": _resize_image_if_necessary( tf.io.gfile.GFile("%s/%s.jpeg" % (images_dir_path, name), mode="rb"), target_pixels=self.builder_config.target_pixels), "label": label, }
Yields Example instances from given CSV. Args: images_dir_path: path to dir in which images are stored. csv_path: optional, path to csv file with two columns: name of image and label. If not provided, just scan image directory, don't set labels. csv_usage: optional, subset of examples from the csv file to use based on the "Usage" column from the csv.
def _deregister_config_file(self, key): state = self.__load_state() if 'remove_configs' not in state: state['remove_configs'] = {} state['remove_configs'][key] = (state['config_files'].pop(key)) self.__dump_state(state)
Deregister a previously registered config file. The caller should ensure that it was previously registered.
def solve(self, solver=None, solverparameters=None): if self.F is None: raise Exception("Relaxation is not generated yet. Call " "'SdpRelaxation.get_relaxation' first") solve_sdp(self, solver, solverparameters)
Call a solver on the SDP relaxation. Upon successful solution, it returns the primal and dual objective values along with the solution matrices. It also sets these values in the `sdpRelaxation` object, along with some status information. :param sdpRelaxation: The SDP relaxation to be solved. :type sdpRelaxation: :class:`ncpol2sdpa.SdpRelaxation`. :param solver: The solver to be called, either `None`, "sdpa", "mosek", "cvxpy", "scs", or "cvxopt". The default is `None`, which triggers autodetect. :type solver: str. :param solverparameters: Parameters to be passed to the solver. Actual options depend on the solver: SDPA: - `"executable"`: Specify the executable for SDPA. E.g., `"executable":"/usr/local/bin/sdpa"`, or `"executable":"sdpa_gmp"` - `"paramsfile"`: Specify the parameter file Mosek: Refer to the Mosek documentation. All arguments are passed on. Cvxopt: Refer to the PICOS documentation. All arguments are passed on. Cvxpy: Refer to the Cvxpy documentation. All arguments are passed on. SCS: Refer to the Cvxpy documentation. All arguments are passed on. :type solverparameters: dict of str.
def nan_empty(self, col: str): try: self.df[col] = self.df[col].replace('', nan) self.ok("Filled empty values with nan in column " + col) except Exception as e: self.err(e, "Can not fill empty values with nan")
Fill empty values with NaN values :param col: name of the colum :type col: str :example: ``ds.nan_empty("mycol")``
def set_size(self, w, h): self.attributes['width'] = str(w) self.attributes['height'] = str(h)
Sets the rectangle size. Args: w (int): width of the rectangle h (int): height of the rectangle
def get(self, request, *args, **kwargs): serializer_class = self.get_serializer_class() context = self.get_serializer_context() services = [] for service_type in SERVICES.keys(): services.append( serializer_class( object(), context=context, service_type=service_type ).data ) return Response(services)
return list of open 311 services
def list_servers(backend, socket=DEFAULT_SOCKET_URL, objectify=False): ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.listServers(backend=backend) return ha_conn.sendCmd(ha_cmd, objectify=objectify)
List servers in haproxy backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_servers mysql
def trim_seqs(input_seqs, trim_len, left_trim_len): logger = logging.getLogger(__name__) okseqs = 0 totseqs = 0 if trim_len < -1: raise ValueError("Invalid trim_len: %d" % trim_len) for label, seq in input_seqs: totseqs += 1 if trim_len == -1: okseqs += 1 yield label, seq elif len(seq) >= trim_len: okseqs += 1 yield label, seq[left_trim_len:trim_len] if okseqs < 0.01*totseqs: logger = logging.getLogger(__name__) errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \ 'than the trim length (%d). ' \ 'Are you using the correct -t trim length?' \ % (totseqs-okseqs, totseqs, trim_len) logger.warn(errmsg) warnings.warn(errmsg, UserWarning) else: logger.debug('trimmed to length %d (%d / %d remaining)' % (trim_len, okseqs, totseqs))
Trim FASTA sequences to specified length. Parameters ---------- input_seqs : iterable of (str, str) The list of input sequences in (label, sequence) format trim_len : int Sequence trimming length. Specify a value of -1 to disable trimming. left_trim_len : int Sequence trimming from the 5' end. A value of 0 will disable this trim. Returns ------- Generator of (str, str) The trimmed sequences in (label, sequence) format
def getPagePixmap(doc, pno, matrix = None, colorspace = csRGB, clip = None, alpha = True): return doc[pno].getPixmap(matrix = matrix, colorspace = colorspace, clip = clip, alpha = alpha)
Create pixmap of document page by page number. Notes: Convenience function calling page.getPixmap. Args: pno: (int) page number matrix: Matrix for transformation (default: Identity). colorspace: (str/Colorspace) rgb, rgb, gray - case ignored, default csRGB. clip: (irect-like) restrict rendering to this area. alpha: (bool) include alpha channel
def varexp(line): ip = get_ipython() funcname, name = line.split() try: import guiqwt.pyplot as pyplot except: import matplotlib.pyplot as pyplot __fig__ = pyplot.figure(); __items__ = getattr(pyplot, funcname[2:])(ip.user_ns[name]) pyplot.show() del __fig__, __items__
Spyder's variable explorer magic Used to generate plots, histograms and images of the variables displayed on it.
def write_attribute_adj_list(self, path): att_mappings = self.get_attribute_mappings() with open(path, mode="w") as file: for k, v in att_mappings.items(): print("{} {}".format(k, " ".join(str(e) for e in v)), file=file)
Write the bipartite attribute graph to a file. :param str path: Path to the output file.
def tab_insert(self, e): u cursor = min(self.l_buffer.point, len(self.l_buffer.line_buffer)) ws = ' ' * (self.tabstop - (cursor % self.tabstop)) self.insert_text(ws) self.finalize()
u'''Insert a tab character.
def time_segments_aggregate(X, interval, time_column, method=['mean']): if isinstance(X, np.ndarray): X = pd.DataFrame(X) X = X.sort_values(time_column).set_index(time_column) if isinstance(method, str): method = [method] start_ts = X.index.values[0] max_ts = X.index.values[-1] values = list() index = list() while start_ts <= max_ts: end_ts = start_ts + interval subset = X.loc[start_ts:end_ts - 1] aggregated = [ getattr(subset, agg)(skipna=True).values for agg in method ] values.append(np.concatenate(aggregated)) index.append(start_ts) start_ts = end_ts return np.asarray(values), np.asarray(index)
Aggregate values over fixed length time segments.
def fabrics(self): if not self.__fabrics: self.__fabrics = Fabrics(self.__connection) return self.__fabrics
Gets the Fabrics API client. Returns: Fabrics:
def real(prompt=None, empty=False): s = _prompt_input(prompt) if empty and not s: return None else: try: return float(s) except ValueError: return real(prompt=prompt, empty=empty)
Prompt a real number. Parameters ---------- prompt : str, optional Use an alternative prompt. empty : bool, optional Allow an empty response. Returns ------- float or None A float if the user entered a valid real number. None if the user pressed only Enter and ``empty`` was True.
def form_valid(self, form): instance = form.save(commit=False) if hasattr(self.request, 'user'): instance.user = self.request.user if settings.CONTACT_FORM_FILTER_MESSAGE: instance.message = bleach.clean( instance.message, tags=settings.CONTACT_FORM_ALLOWED_MESSAGE_TAGS, strip=settings.CONTACT_FORM_STRIP_MESSAGE ) instance.ip = get_user_ip(self.request) instance.site = self.site instance.save() if settings.CONTACT_FORM_USE_SIGNALS: contact_form_valid.send( sender=self, event=self.valid_event, ip=instance.ip, site=self.site, sender_name=instance.sender_name, sender_email=instance.sender_email, email=instance.subject.department.email, subject=instance.subject.title, message=instance.message ) return super(ContactFormView, self).form_valid(form)
This is what's called when the form is valid.
def is_same_as(df, df_to_compare, **kwargs): try: tm.assert_frame_equal(df, df_to_compare, **kwargs) except AssertionError as exc: six.raise_from(AssertionError("DataFrames are not equal"), exc) return df
Assert that two pandas dataframes are the equal Parameters ========== df : pandas DataFrame df_to_compare : pandas DataFrame **kwargs : dict keyword arguments passed through to panda's ``assert_frame_equal`` Returns ======= df : DataFrame
def _get_default_retry_params(): default = getattr(_thread_local_settings, 'default_retry_params', None) if default is None or not default.belong_to_current_request(): return RetryParams() else: return copy.copy(default)
Get default RetryParams for current request and current thread. Returns: A new instance of the default RetryParams.
def normalize(text, lowercase=True, collapse=True, latinize=False, ascii=False, encoding_default='utf-8', encoding=None, replace_categories=UNICODE_CATEGORIES): text = stringify(text, encoding_default=encoding_default, encoding=encoding) if text is None: return if lowercase: text = text.lower() if ascii: text = ascii_text(text) elif latinize: text = latinize_text(text) if text is None: return text = category_replace(text, replace_categories) if collapse: text = collapse_spaces(text) return text
The main normalization function for text. This will take a string and apply a set of transformations to it so that it can be processed more easily afterwards. Arguments: * ``lowercase``: not very mysterious. * ``collapse``: replace multiple whitespace-like characters with a single whitespace. This is especially useful with category replacement which can lead to a lot of whitespace. * ``decompose``: apply a unicode normalization (NFKD) to separate simple characters and their diacritics. * ``replace_categories``: This will perform a replacement of whole classes of unicode characters (e.g. symbols, marks, numbers) with a given character. It is used to replace any non-text elements of the input string.
def decode(self, key): key = BucketKey.decode(key) if key.uuid != self.uuid: raise ValueError("%s is not a bucket corresponding to this limit" % key) return key.params
Given a bucket key, compute the parameters used to compute that key. Note: Deprecated. Use BucketKey.decode() instead. :param key: The bucket key. Note that the UUID must match the UUID of this limit; a ValueError will be raised if this is not the case.
def preloop(self): if not self.parser: self.stdout.write("Welcome to imagemounter {version}".format(version=__version__)) self.stdout.write("\n") self.parser = ImageParser() for p in self.args.paths: self.onecmd('disk "{}"'.format(p))
if the parser is not already set, loads the parser.
def install_requirements(self, path, index=None): cmd = 'install -r {0}'.format(path) if index: cmd = 'install --index-url {0} -r {1}'.format(index, path) self.pip(cmd)
Install packages from a requirements.txt file. Args: path (str): The path to the requirements file. index (str): The URL for a pypi index to use.
def get_entry_compact_text_repr(entry, entries): text = get_shortest_text_value(entry) if text is not None: return text else: sources = get_sourced_from(entry) if sources is not None: texts = [] for source in sources: source_entry = entries[source] texts.append(get_shortest_text_value(source_entry)) return get_shortest_string(texts)
If the entry has a text value, return that. If the entry has a source_from value, return the text value of the source. Otherwise, return None.
def write(self, data): _complain_ifclosed(self.closed) if self.__encoding: self.f.write(data.encode(self.__encoding, self.__errors)) return len(data) else: return self.f.write(data)
Write ``data`` to the file. :type data: bytes :param data: the data to be written to the file :rtype: int :return: the number of bytes written
def clean(cls, path): for pth in os.listdir(path): pth = os.path.abspath(os.path.join(path, pth)) if os.path.isdir(pth): logger.debug('Removing directory %s' % pth) shutil.rmtree(pth) else: logger.debug('Removing file %s' % pth) os.remove(pth)
Clean up all the files in a provided path
def match(self, name): if (self.ns + self.name).startswith(name): return True for alias in self.aliases: if (self.ns + alias).startswith(name): return True
Compare an argument string to the task name.
def run(self): self._listening_sock, self._address = ( bind_domain_socket(self._address) if self._uds_path else bind_tcp_socket(self._address)) if self._ssl: certfile = os.path.join(os.path.dirname(__file__), 'server.pem') self._listening_sock = _ssl.wrap_socket( self._listening_sock, certfile=certfile, server_side=True) self._accept_thread = threading.Thread(target=self._accept_loop) self._accept_thread.daemon = True self._accept_thread.start() return self.port
Begin serving. Returns the bound port, or 0 for domain socket.
def alterar(self, id_divisiondc, name): if not is_valid_int_param(id_divisiondc): raise InvalidParameterError( u'The identifier of Division Dc is invalid or was not informed.') url = 'divisiondc/' + str(id_divisiondc) + '/' division_dc_map = dict() division_dc_map['name'] = name code, xml = self.submit({'division_dc': division_dc_map}, 'PUT', url) return self.response(code, xml)
Change Division Dc from by the identifier. :param id_divisiondc: Identifier of the Division Dc. Integer value and greater than zero. :param name: Division Dc name. String with a minimum 2 and maximum of 80 characters :return: None :raise InvalidParameterError: The identifier of Division Dc or name is null and invalid. :raise NomeDivisaoDcDuplicadoError: There is already a registered Division Dc with the value of name. :raise DivisaoDcNaoExisteError: Division Dc not registered. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
def _connect_lxd(spec): return { 'method': 'lxd', 'kwargs': { 'container': spec.remote_addr(), 'python_path': spec.python_path(), 'lxc_path': spec.mitogen_lxc_path(), 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), 'remote_name': get_remote_name(spec), } }
Return ContextService arguments for an LXD container connection.
def parse_lint_result(lint_result_path, manifest_path): unused_string_pattern = re.compile('The resource `R\.string\.([^`]+)` appears to be unused') mainfest_string_refs = get_manifest_string_refs(manifest_path) root = etree.parse(lint_result_path).getroot() issues = [] for issue_xml in root.findall('.//issue[@id="UnusedResources"]'): message = issue_xml.get('message') unused_string = re.match(unused_string_pattern, issue_xml.get('message')) has_string_in_manifest = unused_string and unused_string.group(1) in mainfest_string_refs if not has_string_in_manifest: issues.extend(_get_issues_from_location(UnusedResourceIssue, issue_xml.findall('location'), message)) for issue_xml in root.findall('.//issue[@id="ExtraTranslation"]'): message = issue_xml.get('message') if re.findall(ExtraTranslationIssue.pattern, message): issues.extend(_get_issues_from_location(ExtraTranslationIssue, issue_xml.findall('location'), message)) return issues
Parse lint-result.xml and create Issue for every problem found except unused strings referenced in AndroidManifest
def add_authorizers(self, authorizers): self.security_definitions = self.security_definitions or {} for authorizer_name, authorizer in authorizers.items(): self.security_definitions[authorizer_name] = authorizer.generate_swagger()
Add Authorizer definitions to the securityDefinitions part of Swagger. :param list authorizers: List of Authorizer configurations which get translated to securityDefinitions.
def delete_dcnm_out_nwk(self, tenant_id, fw_dict, is_fw_virt=False): tenant_name = fw_dict.get('tenant_name') ret = self._delete_service_nwk(tenant_id, tenant_name, 'out') if ret: res = fw_const.DCNM_OUT_NETWORK_DEL_SUCCESS LOG.info("out Service network deleted for tenant %s", tenant_id) else: res = fw_const.DCNM_OUT_NETWORK_DEL_FAIL LOG.info("out Service network deleted failed for tenant %s", tenant_id) self.update_fw_db_result(tenant_id, dcnm_status=res) return ret
Delete the DCNM OUT network and update the result.
def reportDeprecatedWorkerNameUsage(message, stacklevel=None, filename=None, lineno=None): if filename is None: if stacklevel is None: stacklevel = 3 else: stacklevel += 2 warnings.warn(DeprecatedWorkerNameWarning(message), None, stacklevel) else: assert stacklevel is None if lineno is None: lineno = 0 warnings.warn_explicit( DeprecatedWorkerNameWarning(message), DeprecatedWorkerNameWarning, filename, lineno)
Hook that is ran when old API name is used. :param stacklevel: stack level relative to the caller's frame. Defaults to caller of the caller of this function.
def copy_attr(f1, f2): copyit = lambda x: not hasattr(f2, x) and x[:10] == 'PACKAGING_' if f1._tags: pattrs = [tag for tag in f1._tags if copyit(tag)] for attr in pattrs: f2.Tag(attr, f1.GetTag(attr))
copies the special packaging file attributes from f1 to f2.
def elliconstraint(self, x, cfac=1e8, tough=True, cond=1e6): N = len(x) f = sum(cond**(np.arange(N)[-1::-1] / (N - 1)) * x**2) cvals = (x[0] + 1, x[0] + 1 + 100 * x[1], x[0] + 1 - 100 * x[1]) if tough: f += cfac * sum(max(0, c) for c in cvals) else: f += cfac * sum(max(0, c + 1e-3)**2 for c in cvals) return f
ellipsoid test objective function with "constraints"
def skull_strip(dset,suffix='_ns',prefix=None,unifize=True): if prefix==None: prefix = nl.suffix(dset,suffix) unifize_dset = nl.suffix(dset,'_u') cmd = bet2 if bet2 else 'bet2' if unifize: info = nl.dset_info(dset) if info==None: nl.notify('Error: could not read info for dset %s' % dset,level=nl.level.error) return False cmd = os.path.join(fsl_dir,cmd) if fsl_dir else cmd cutoff_value = nl.max(dset) * 0.05 nl.run(['3dUnifize','-prefix',unifize_dset,nl.calc(dset,'step(a-%f)*a' % cutoff_value)],products=unifize_dset) else: unifize_dset = dset nl.run([cmd,unifize_dset,prefix,'-w',0.5],products=prefix)
use bet to strip skull from given anatomy
def new_reply(cls, thread, user, content): msg = cls.objects.create(thread=thread, sender=user, content=content) thread.userthread_set.exclude(user=user).update(deleted=False, unread=True) thread.userthread_set.filter(user=user).update(deleted=False, unread=False) message_sent.send(sender=cls, message=msg, thread=thread, reply=True) return msg
Create a new reply for an existing Thread. Mark thread as unread for all other participants, and mark thread as read by replier.
def _guess_cmd_name(self, cmd): if cmd[0] == 'zcat' and 'bowtie' in cmd: return 'bowtie' if cmd[0] == 'samtools': return ' '.join(cmd[0:2]) if cmd[0] == 'java': jars = [s for s in cmd if '.jar' in s] return os.path.basename(jars[0].replace('.jar', '')) return cmd[0]
Manually guess some known command names, where we can do a better job than the automatic parsing.
def oneImageNLF(img, img2=None, signal=None): x, y, weights, signal = calcNLF(img, img2, signal) _, fn, _ = _evaluate(x, y, weights) return fn, signal
Estimate the NLF from one or two images of the same kind
def _is_simple_type(value): return isinstance(value, six.string_types) or isinstance(value, int) or isinstance(value, float) or isinstance(value, bool)
Returns True, if the given parameter value is an instance of either int, str, float or bool.
def qnm_freq_decay(f_0, tau, decay): q_0 = pi * f_0 * tau alpha = 1. / decay alpha_sq = 1. / decay / decay q_sq = (alpha_sq + 4*q_0*q_0 + alpha*numpy.sqrt(alpha_sq + 16*q_0*q_0)) / 4. return numpy.sqrt(q_sq) / pi / tau
Return the frequency at which the amplitude of the ringdown falls to decay of the peak amplitude. Parameters ---------- f_0 : float The ringdown-frequency, which gives the peak amplitude. tau : float The damping time of the sinusoid. decay: float The fraction of the peak amplitude. Returns ------- f_decay: float The frequency at which the amplitude of the frequency-domain ringdown falls to decay of the peak amplitude.
def _maximization(self, X): for i in range(self.k): resp = np.expand_dims(self.responsibility[:, i], axis=1) mean = (resp * X).sum(axis=0) / resp.sum() covariance = (X - mean).T.dot((X - mean) * resp) / resp.sum() self.parameters[i]["mean"], self.parameters[i]["cov"] = mean, covariance n_samples = np.shape(X)[0] self.priors = self.responsibility.sum(axis=0) / n_samples
Update the parameters and priors
def get_info(self, symbol): sym = self._get_symbol_info(symbol) if not sym: raise NoDataFoundException("Symbol does not exist.") ret = {} ret['chunk_count'] = sym[CHUNK_COUNT] ret['len'] = sym[LEN] ret['appended_rows'] = sym[APPEND_COUNT] ret['metadata'] = sym[METADATA] if METADATA in sym else None ret['chunker'] = sym[CHUNKER] ret['chunk_size'] = sym[CHUNK_SIZE] if CHUNK_SIZE in sym else 0 ret['serializer'] = sym[SERIALIZER] return ret
Returns information about the symbol, in a dictionary Parameters ---------- symbol: str the symbol for the given item in the DB Returns ------- dictionary
def _DownloadScript(self, url, dest_dir): if url.startswith(r'gs://'): url = re.sub('^gs://', 'https://storage.googleapis.com/', url) return self._DownloadAuthUrl(url, dest_dir) header = r'http[s]?://' domain = r'storage\.googleapis\.com' bucket = r'(?P<bucket>[a-z0-9][-_.a-z0-9]*[a-z0-9])' obj = r'(?P<obj>[^\*\?]+)' gs_regex = re.compile(r'\A%s%s\.%s/%s\Z' % (header, bucket, domain, obj)) match = gs_regex.match(url) if match: return self._DownloadAuthUrl(url, dest_dir) gs_regex = re.compile( r'\A%s(commondata)?%s/%s/%s\Z' % (header, domain, bucket, obj)) match = gs_regex.match(url) if match: return self._DownloadAuthUrl(url, dest_dir) return self._DownloadUrl(url, dest_dir)
Download the contents of the URL to the destination. Args: url: string, the URL to download. dest_dir: string, the path to a directory for storing metadata scripts. Returns: string, the path to the file storing the metadata script.
def GetSqlValuesTuple(self, trip_id): result = [] for fn in self._SQL_FIELD_NAMES: if fn == 'trip_id': result.append(trip_id) else: result.append(getattr(self, fn)) return tuple(result)
Return a tuple that outputs a row of _FIELD_NAMES to be written to a SQLite database. Arguments: trip_id: The trip_id of the trip to which this StopTime corresponds. It must be provided, as it is not stored in StopTime.
def flush(self, using=None, **kwargs): return self._get_connection(using).indices.flush(index=self._name, **kwargs)
Preforms a flush operation on the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.flush`` unchanged.
def device_info(self): return { 'family': self.family, 'platform': self.platform, 'os_type': self.os_type, 'os_version': self.os_version, 'udi': self.udi, 'driver_name': self.driver.platform, 'mode': self.mode, 'is_console': self.is_console, 'is_target': self.is_target, 'hostname': self.hostname, }
Return device info dict.
def save_snapshot(self, si, logger, vm_uuid, snapshot_name, save_memory): vm = self.pyvmomi_service.find_by_uuid(si, vm_uuid) snapshot_path_to_be_created = SaveSnapshotCommand._get_snapshot_name_to_be_created(snapshot_name, vm) save_vm_memory_to_snapshot = SaveSnapshotCommand._get_save_vm_memory_to_snapshot(save_memory) SaveSnapshotCommand._verify_snapshot_uniquness(snapshot_path_to_be_created, vm) task = self._create_snapshot(logger, snapshot_name, vm, save_vm_memory_to_snapshot) self.task_waiter.wait_for_task(task=task, logger=logger, action_name='Create Snapshot') return snapshot_path_to_be_created
Creates a snapshot of the current state of the virtual machine :param vim.ServiceInstance si: py_vmomi service instance :type si: vim.ServiceInstance :param logger: Logger :type logger: cloudshell.core.logger.qs_logger.get_qs_logger :param vm_uuid: UUID of the virtual machine :type vm_uuid: str :param snapshot_name: Snapshot name to save the snapshot to :type snapshot_name: str :param save_memory: Snapshot the virtual machine's memory. Lookup, Yes / No :type save_memory: str
def render_pep440(vcs): if vcs is None: return None tags = vcs.split('-') if len(tags) == 1: return tags[0] else: return tags[0] + '+' + '.'.join(tags[1:])
Convert git release tag into a form that is PEP440 compliant.
def plan_scripts(self): if not self.__plan_scripts: self.__plan_scripts = PlanScripts(self.__connection) return self.__plan_scripts
Gets the Plan Scripts API client. Returns: PlanScripts:
def method(self, returns, **parameter_types): @wrapt.decorator def type_check_wrapper(method, instance, args, kwargs): if instance is not None: raise Exception("Instance shouldn't be set.") parameter_names = inspect.getargspec(method).args defaults = inspect.getargspec(method).defaults parameters = self._collect_parameters(parameter_names, args, kwargs, defaults) parameter_checker.check_types(parameters, parameter_types, self._strict_floats) result = method(*args, **kwargs) parameter_checker.check_return_type(result, returns, self._strict_floats) return result def register_method(method): parameter_names = inspect.getargspec(method).args parameter_checker.check_type_declaration(parameter_names, parameter_types) wrapped_method = type_check_wrapper(method, None, None, None) fully_qualified_name = "{}.{}".format(method.__module__, method.__name__) self.register(fully_qualified_name, wrapped_method, MethodSignature.create(parameter_names, parameter_types, returns)) return wrapped_method return register_method
Syntactic sugar for registering a method Example: >>> registry = Registry() >>> @registry.method(returns=int, x=int, y=int) ... def add(x, y): ... return x + y :param returns: The method's return type :type returns: type :param parameter_types: The types of the method's parameters :type parameter_types: dict[str, type] .. versionadded:: 0.1.0
def write_sources_file(): file_content = ( 'schemes: ' 'https://github.com/chriskempson/base16-schemes-source.git\n' 'templates: ' 'https://github.com/chriskempson/base16-templates-source.git' ) file_path = rel_to_cwd('sources.yaml') with open(file_path, 'w') as file_: file_.write(file_content)
Write a sources.yaml file to current working dir.
def serialize(node, stream=None, Dumper=Dumper, **kwds): return serialize_all([node], stream, Dumper=Dumper, **kwds)
Serialize a representation tree into a YAML stream. If stream is None, return the produced string instead.
def resolve_variables(variables, context, provider): for variable in variables: variable.resolve(context, provider)
Given a list of variables, resolve all of them. Args: variables (list of :class:`stacker.variables.Variable`): list of variables context (:class:`stacker.context.Context`): stacker context provider (:class:`stacker.provider.base.BaseProvider`): subclass of the base provider