Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
20,900
def camera_list(self, **kwargs): api = self._api_info[] payload = dict({ : self._sid, : api[], : , : api[], }, **kwargs) response = self._get_json_with_retry(api[], payload) cameras = [] for data in response[][]: cameras.append(Camera(data, self._video_stream_url)) return cameras
Return a list of cameras.
20,901
def video_set_callbacks(self, lock, unlock, display, opaque): return libvlc_video_set_callbacks(self, lock, unlock, display, opaque)
Set callbacks and private data to render decoded video to a custom area in memory. Use L{video_set_format}() or L{video_set_format_callbacks}() to configure the decoded format. @param lock: callback to lock video memory (must not be NULL). @param unlock: callback to unlock video memory (or NULL if not needed). @param display: callback to display video (or NULL if not needed). @param opaque: private pointer for the three callbacks (as first parameter). @version: LibVLC 1.1.1 or later.
20,902
def main(): if in sys.argv: print(main.__doc__) sys.exit() dir_path = pmag.get_named_arg("-WD", default_val=".") input_dir_path = pmag.get_named_arg(, ) if not input_dir_path: input_dir_path = dir_path in_file = pmag.get_named_arg("-f", default_val="sites.txt") in_file = pmag.resolve_file_name(in_file, input_dir_path) if "-ID" not in sys.argv: input_dir_path = os.path.split(in_file)[0] plot_by = pmag.get_named_arg("-obj", default_val="all").lower() spec_file = pmag.get_named_arg("-fsp", default_val="specimens.txt") samp_file = pmag.get_named_arg("-fsa", default_val="samples.txt") site_file = pmag.get_named_arg("-fsi", default_val="sites.txt") loc_file = pmag.get_named_arg("-flo", default_val="locations.txt") ignore_tilt = False if in sys.argv: ignore_tilt = True color_map = "coolwarm" if in sys.argv: contour = True if in sys.argv: ind = sys.argv.index() color_map = sys.argv[ind+1] else: color_map = else: contour = False interactive = True save_plots = False if in sys.argv: save_plots = True interactive = False plot_ell = False if in sys.argv: plot_ell = pmag.get_named_arg("-ell", "F") crd = pmag.get_named_arg("-crd", default_val="g") fmt = pmag.get_named_arg("-fmt", "svg") ipmag.eqarea_magic(in_file, dir_path, input_dir_path, spec_file, samp_file, site_file, loc_file, plot_by, crd, ignore_tilt, save_plots, fmt, contour, color_map, plot_ell, "all", interactive)
NAME eqarea_magic.py DESCRIPTION makes equal area projections from declination/inclination data SYNTAX eqarea_magic.py [command line options] INPUT takes magic formatted sites, samples, specimens, or measurements OPTIONS -h prints help message and quits -f FILE: specify input magic format file from magic, default='sites.txt' supported types=[measurements, specimens, samples, sites] -fsp FILE: specify specimen file name, (required if you want to plot measurements by sample) default='specimens.txt' -fsa FILE: specify sample file name, (required if you want to plot specimens by site) default='samples.txt' -fsi FILE: specify site file name, default='sites.txt' -flo FILE: specify location file name, default='locations.txt' -obj OBJ: specify level of plot [all, sit, sam, spc], default is all -crd [s,g,t]: specify coordinate system, [s]pecimen, [g]eographic, [t]ilt adjusted default is geographic, unspecified assumed geographic -fmt [svg,png,jpg] format for output plots -ell [F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Boostrap eigenvectors -c plot as colour contour -cm CM use color map CM [default is coolwarm] -sav save plot and quit quietly -no-tilt data are unoriented, allows plotting of measurement dec/inc NOTE all: entire file; sit: site; sam: sample; spc: specimen
20,903
def _set_query_data_fast_1(self, page): self.data[] = page.get() assessments = page.get() if assessments: self.data[] = assessments extract = page.get() if extract: self.data[] = extract extext = html2text.html2text(extract) if extext: self.data[] = extext.strip() fullurl = page.get() if fullurl: self.data[] = fullurl self.data[] = fullurl + length = page.get() if length: self.data[] = length self._extend_data(, utils.get_links(page.get())) self._update_data(, , page.get()) pageprops = page.get() if pageprops: wikibase = pageprops.get() if wikibase: self.data[] = wikibase self.data[] = utils.wikidata_url(wikibase) if in pageprops: self.data[] = len(self.data[])
set less expensive action=query response data PART 1
20,904
def _merge_points(self, function_address): try: new_function = self.kb.functions[function_address] except KeyError: return [ ] if function_address not in self._function_merge_points: ordered_merge_points = CFGUtils.find_merge_points(function_address, new_function.endpoints, new_function.graph) self._function_merge_points[function_address] = ordered_merge_points return self._function_merge_points[function_address]
Return the ordered merge points for a specific function. :param int function_address: Address of the querying function. :return: A list of sorted merge points (addresses). :rtype: list
20,905
def from_df(cls, df_long, df_short): pop = cls(1,1,1,1,1) pop.orbpop_long = OrbitPopulation.from_df(df_long) pop.orbpop_short = OrbitPopulation.from_df(df_short) return pop
Builds TripleOrbitPopulation from DataFrame ``DataFrame`` objects must be of appropriate form to pass to :func:`OrbitPopulation.from_df`. :param df_long, df_short: :class:`pandas.DataFrame` objects to pass to :func:`OrbitPopulation.from_df`.
20,906
def str_presenter(dmpr, data): if is_multiline(data): return dmpr.represent_scalar(, data, style=) return dmpr.represent_scalar(, data)
Return correct str_presenter to write multiple lines to a yaml field. Source: http://stackoverflow.com/a/33300001
20,907
def get_tag(self, el): name = self.get_tag_name(el) return util.lower(name) if name is not None and not self.is_xml else name
Get tag.
20,908
def register(klass): assert(isinstance(klass, type)) name = klass.__name__.lower() if name in Optimizer.opt_registry: warnings.warn( % (klass.__module__, klass.__name__, Optimizer.opt_registry[name].__module__, Optimizer.opt_registry[name].__name__)) Optimizer.opt_registry[name] = klass return klass
Registers a new optimizer. Once an optimizer is registered, we can create an instance of this optimizer with `create_optimizer` later. Examples -------- >>> @mx.optimizer.Optimizer.register ... class MyOptimizer(mx.optimizer.Optimizer): ... pass >>> optim = mx.optimizer.Optimizer.create_optimizer('MyOptimizer') >>> print(type(optim)) <class '__main__.MyOptimizer'>
20,909
def status(self): if self._future.running(): _status = JobStatus.RUNNING elif self._future.cancelled(): _status = JobStatus.CANCELLED elif self._future.done(): _status = JobStatus.DONE if self._future.exception() is None else JobStatus.ERROR else: _status = JobStatus.INITIALIZING return _status
Gets the status of the job by querying the Python's future Returns: qiskit.providers.JobStatus: The current JobStatus Raises: JobError: If the future is in unexpected state concurrent.futures.TimeoutError: if timeout occurred.
20,910
def GetStream(data=None): if len(__mstreams_available__) == 0: if data: mstream = MemoryStream(data) mstream.seek(0) else: mstream = MemoryStream() __mstreams__.append(mstream) return mstream mstream = __mstreams_available__.pop() if data is not None and len(data): mstream.Cleanup() mstream.write(data) mstream.seek(0) return mstream
Get a MemoryStream instance. Args: data (bytes, bytearray, BytesIO): (Optional) data to create the stream from. Returns: MemoryStream: instance.
20,911
def parse_yaml(self, y): self._targets = [] if in y: for t in y[]: if in t[]: new_target = WaitTime() elif in t[]: new_target = Preceding() else: new_target = Condition() new_target.parse_yaml(t) self._targets.append(new_target) return self
Parse a YAML speficication of a message sending object into this object.
20,912
def get_dataset(self, dataset_key): try: return self._datasets_api.get_dataset( *(parse_dataset_key(dataset_key))).to_dict() except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
Retrieve an existing dataset definition This method retrieves metadata about an existing :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :returns: Dataset definition, with all attributes :rtype: dict :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> intro_dataset = api_client.get_dataset( ... 'jonloyens/an-intro-to-dataworld-dataset') # doctest: +SKIP >>> intro_dataset['title'] # doctest: +SKIP 'An Intro to data.world Dataset'
20,913
def room(model, solution=None, linear=False, delta=0.03, epsilon=1E-03): with model: add_room(model=model, solution=solution, linear=linear, delta=delta, epsilon=epsilon) solution = model.optimize() return solution
Compute a single solution based on regulatory on/off minimization (ROOM). Compute a new flux distribution that minimizes the number of active reactions needed to accommodate a previous reference solution. Regulatory on/off minimization (ROOM) is generally used to assess the impact of knock-outs. Thus the typical usage is to provide a wildtype flux distribution as reference and a model in knock-out state. Parameters ---------- model : cobra.Model The model state to compute a ROOM-based solution for. solution : cobra.Solution, optional A (wildtype) reference solution. linear : bool, optional Whether to use the linear ROOM formulation or not (default False). delta: float, optional The relative tolerance range (additive) (default 0.03). epsilon: float, optional The absolute tolerance range (multiplicative) (default 0.001). Returns ------- cobra.Solution A flux distribution with minimal active reaction changes compared to the reference. See Also -------- add_room : add ROOM constraints and objective
20,914
def secant(a, b, fn, epsilon): f1 = fn(a) if abs(f1) <= epsilon: return a f2 = fn(b) if abs(f2) <= epsilon: return b for i in range(100): slope = (f2 - f1) / (b - a) c = b - f2 / slope f3 = fn(c) if abs(f3) < epsilon: return c a = b b = c f1 = f2 f2 = f3 return None
One of the fasest root-finding algorithms. The method calculates the slope of the function fn and this enables it to converge to a solution very fast. However, if started too far away from a root, the method may not converge (returning a None). For this reason, it is recommended that this function be used first in any guess-and-check workflow and, if it fails to find a root, the bisect() method should be used. Args: a: The lowest possible boundary of the value you are tying to find. b: The highest possible boundary of the value you are tying to find. fn: A function representing the relationship between the value you are trying to find and the target condition you are trying to satisfy. It should typically be structured in the following way: `def fn(value_trying_to_find): funct(value_trying_to_find) - target_desired_from_funct` ...but the subtraction should be swtiched if value_trying_to_find has a negative relationship with the funct. epsilon: The acceptable error in the target_desired_from_funct. Returns: root: The value that gives the target_desired_from_funct. References ---------- [1] Wikipedia contributors. (2018, December 29). Root-finding algorithm. In Wikipedia, The Free Encyclopedia. Retrieved 18:16, December 30, 2018, from https://en.wikipedia.org/wiki/Root-finding_algorithm#Secant_method
20,915
def Lorentzian(x, a, x0, sigma, y0): return a / (1 + ((x - x0) / sigma) ** 2) + y0
Lorentzian peak Inputs: ------- ``x``: independent variable ``a``: scaling factor (extremal value) ``x0``: center ``sigma``: half width at half maximum ``y0``: additive constant Formula: -------- ``a/(1+((x-x0)/sigma)^2)+y0``
20,916
def reduce_l1(attrs, inputs, proto_obj): new_attrs = translation_utils._fix_attribute_names(attrs, {:}) new_attrs = translation_utils._add_extra_attributes(new_attrs, { : 1}) return , new_attrs, inputs
Reduce input tensor by l1 normalization.
20,917
def assemble(self,roboset=None,color=None,format=None,bgset=None,sizex=300,sizey=300): if roboset == : if color in self.colors: roboset = + color else: randomcolor = self.colors[self.hasharray[0] % len(self.colors) ] roboset = + randomcolor roboparts = self._get_list_of_files(self.resourcedir + + roboset) if format == : r, g, b, a = roboimg.split() roboimg = Image.merge("RGB", (r, g, b)) if bgset is not None: bg = Image.open(background) bg = bg.resize((1024,1024)) bg.paste(roboimg,(0,0),roboimg) roboimg = bg self.img = roboimg.resize((sizex,sizey),Image.ANTIALIAS) self.format = format
Build our Robot! Returns the robot image itself.
20,918
def resize(self, width, height): if not self.fbo: return self.width = width // self.widget.devicePixelRatio() self.height = height // self.widget.devicePixelRatio() self.buffer_width = width self.buffer_height = height super().resize(width, height)
Pyqt specific resize callback.
20,919
def scolor(self): global palette color = palette[self.color_index] if len(palette) - 1 == self.color_index: self.color_index = 0 else: self.color_index += 1 self.color(color)
Set a unique color from a serie
20,920
def get(self, *raw_args, **raw_kwargs): args = self.prepare_args(*raw_args) kwargs = self.prepare_kwargs(**raw_kwargs) key = self.key(*args, **kwargs) item = self.cache.get(key) call = Call(args=raw_args, kwargs=raw_kwargs) if item is None: if self.should_missing_item_be_fetched_synchronously(*args, **kwargs): logger.debug(("Job %s with key - cache MISS - running " "synchronous refresh"), self.class_path, key) result = self.refresh(*args, **kwargs) return self.process_result( result, call=call, cache_status=self.MISS, sync_fetch=True) else: logger.debug(("Job %s with key - cache MISS - triggering " "async refresh and returning empty result"), self.class_path, key) result = self.empty() self.store(key, self.timeout(*args, **kwargs), result) self.async_refresh(*args, **kwargs) return self.process_result( result, call=call, cache_status=self.MISS, sync_fetch=False) expiry, data = item delta = time.time() - expiry if delta > 0: if self.should_stale_item_be_fetched_synchronously( delta, *args, **kwargs): logger.debug( ("Job %s with key - STALE cache hit - running " "synchronous refresh"), self.class_path, key) result = self.refresh(*args, **kwargs) return self.process_result( result, call=call, cache_status=self.STALE, sync_fetch=True) else: logger.debug( ("Job %s with key - STALE cache hit - triggering " "async refresh and returning stale result"), self.class_path, key) timeout = self.timeout(*args, **kwargs) self.store(key, timeout, data) self.async_refresh(*args, **kwargs) return self.process_result( data, call=call, cache_status=self.STALE, sync_fetch=False) else: logger.debug("Job %s with key - cache HIT", self.class_path, key) return self.process_result(data, call=call, cache_status=self.HIT)
Return the data for this function (using the cache if possible). This method is not intended to be overidden
20,921
def _get_template_list(self): " Get the hierarchy of templates belonging to the object/box_type given. " t_list = [] if hasattr(self.obj, ) and self.obj.category_id: cat = self.obj.category base_path = % (cat.path, self.name) if hasattr(self.obj, ): t_list.append(base_path + % (self.obj.slug, self.box_type,)) t_list.append(base_path + % (self.box_type,)) t_list.append(base_path + ) base_path = % self.name if hasattr(self.obj, ): t_list.append(base_path + % (self.obj.slug, self.box_type,)) t_list.append(base_path + % (self.box_type,)) t_list.append(base_path + ) t_list.append( % self.box_type) t_list.append() return t_list
Get the hierarchy of templates belonging to the object/box_type given.
20,922
def annot_heatmap(ax,dannot, xoff=0,yoff=0, kws_text={}, annot_left=,annot_right=, annothalf=, ): for xtli,xtl in enumerate(ax.get_xticklabels()): xtl=xtl.get_text() for ytli,ytl in enumerate(ax.get_yticklabels()): ytl=ytl.get_text() if annothalf==: ax.text(xtli+0.5+xoff,ytli+0.5+yoff,dannot.loc[xtl,ytl],**kws_text,ha=) else: ax.text(ytli+0.5+yoff,xtli+0.5+xoff,dannot.loc[xtl,ytl],**kws_text,ha=) return ax
kws_text={'marker','s','linewidth','facecolors','edgecolors'}
20,923
def set_forbidden_uptodate(self, uptodate): if self._forbidden_uptodate == uptodate: return self._forbidden_uptodate = uptodate self.invalidateFilter()
Set all forbidden uptodate values :param uptodatees: a list with forbidden uptodate values :uptodate uptodatees: list :returns: None :ruptodate: None :raises: None
20,924
def __getRefererUrl(self, url=None): if url is None: url = "http://www.arcgis.com/sharing/rest/portals/self" params = { "f" : "json", "token" : self.token } val = self._get(url=url, param_dict=params, proxy_url=self._proxy_url, proxy_port=self._proxy_port) self._referer_url = "arcgis.com" self._token = None return self._referer_url
gets the referer url for the token handler
20,925
def extract_db_info(self, obj, keys): objl = self.convert_in(obj) if isinstance(objl, self.__class__): return objl.update_meta_info() try: with builtins.open(objl, mode=) as fd: state = json.load(fd) except IOError as e: raise e result = super(BaseStructuredCalibration, self).extract_db_info(state, keys) try: minfo = state[] result[] = minfo[] origin = minfo[] date_obs = origin[] except KeyError: origin = {} date_obs = "1970-01-01T00:00:00.00" result[] = state[] result[] = state[] result[] = state[] result[] = state[] result[] = conv.convert_date(date_obs) result[] = origin return result
Extract metadata from serialized file
20,926
def prior_transform(self, unit_coords, priors, prior_args=[]): theta = [] for i, (u, p) in enumerate(zip(unit_coords, priors)): func = p.unit_transform try: kwargs = prior_args[i] except(IndexError): kwargs = {} theta.append(func(u, **kwargs)) return theta
An example of one way to use the `Prior` objects below to go from unit cube to parameter space, for nested sampling. This takes and returns a list instead of an array, to accomodate possible vector parameters. Thus one will need something like ``theta_array=np.concatenate(*theta)`` :param unit_coords: Coordinates on the unit prior hyper-cube. Iterable. :param priors: A list of `Prior` objects, iterable of same length as `unit_coords`. :param prior_args: (optional) A list of dictionaries of prior function keyword arguments. :returns theta: A list of parameter values corresponding to the given coordinates on the prior unit hypercube.
20,927
def getPixels(self): array = self.toArray() (width, height, depth) = array.size for x in range(width): for y in range(height): yield Pixel(array, x, y)
Return a stream of pixels from current Canvas.
20,928
def on_error(e): exname = {: , : } sys.stderr.write(.format(exname[e.__class__.__name__], str(e))) sys.stderr.write() sys.exit(1)
Error handler RuntimeError or ValueError exceptions raised by commands will be handled by this function.
20,929
def get_config(self): config = { : self.location, : self.language, : self.topic, } return config
function to get current configuration
20,930
def _machinectl(cmd, output_loglevel=, ignore_retcode=False, use_vt=False): prefix = return __salt__[](.format(prefix, cmd), output_loglevel=output_loglevel, ignore_retcode=ignore_retcode, use_vt=use_vt)
Helper function to run machinectl
20,931
def invoke_function(self, script_hash, operation, params, **kwargs): contract_params = encode_invocation_params(params) raw_result = self._call( JSONRPCMethods.INVOKE_FUNCTION.value, [script_hash, operation, contract_params, ], **kwargs) return decode_invocation_result(raw_result)
Invokes a contract's function with given parameters and returns the result. :param script_hash: contract script hash :param operation: name of the operation to invoke :param params: list of paramaters to be passed in to the smart contract :type script_hash: str :type operation: str :type params: list :return: result of the invocation :rtype: dictionary
20,932
def _get_ned_sources_needing_metadata( self): self.log.debug( ) tableName = self.dbTableName sqlQuery = u % locals() rows = readquery( log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn, quiet=False ) self.theseIds = [] self.theseIds[:] = [r["ned_name"] for r in rows] self.log.debug( ) return len(self.theseIds)
*Get the names of 50000 or less NED sources that still require metabase in the database* **Return:** - ``len(self.theseIds)`` -- the number of NED IDs returned *Usage:* .. code-block:: python numberSources = stream._get_ned_sources_needing_metadata()
20,933
def _all_feature_values( self, column, feature, distinct=True, contig=None, strand=None): return self.db.query_feature_values( column=column, feature=feature, distinct=distinct, contig=contig, strand=strand)
Cached lookup of all values for a particular feature property from the database, caches repeated queries in memory and stores them as a CSV. Parameters ---------- column : str Name of property (e.g. exon_id) feature : str Type of entry (e.g. exon) distinct : bool, optional Keep only unique values contig : str, optional Restrict query to particular contig strand : str, optional Restrict results to "+" or "-" strands Returns a list constructed from query results.
20,934
def create_record(self, rtype=None, name=None, content=None, **kwargs): if not rtype and kwargs.get(): warnings.warn(, DeprecationWarning) rtype = kwargs.get() return self._create_record(rtype, name, content)
Create record. If record already exists with the same content, do nothing.
20,935
def get_value(self, entry_name: Text, entry_lines: Sequence[Text]) -> Optional[Text]: for line in entry_lines: match = self._regex.match(line) if match: return match.group(1) return None
See base class method.
20,936
def put( self, item: _T, timeout: Union[float, datetime.timedelta] = None ) -> "Future[None]": future = Future() try: self.put_nowait(item) except QueueFull: self._putters.append((item, future)) _set_timeout(future, timeout) else: future.set_result(None) return future
Put an item into the queue, perhaps waiting until there is room. Returns a Future, which raises `tornado.util.TimeoutError` after a timeout. ``timeout`` may be a number denoting a time (on the same scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a `datetime.timedelta` object for a deadline relative to the current time.
20,937
def delete(self, name): if self.service.splunk_version >= (5,): Collection.delete(self, name) else: raise IllegalOperationException("Deleting indexes via the REST API is " "not supported before Splunk version 5.")
Deletes a given index. **Note**: This method is only supported in Splunk 5.0 and later. :param name: The name of the index to delete. :type name: ``string``
20,938
def all(self): identifiers = [] query = text() for result in self.execute(query): vid, type_, name = result res = IdentifierSearchResult( score=1, vid=vid, type=type_, name=name) identifiers.append(res) return identifiers
Returns list with all indexed identifiers.
20,939
def sanitize(self): super(InfoMessage, self).sanitize() if not isinstance(self.is_reply, bool): raise ValueError() if len(bytes(self.nonce)) != 8: raise ValueError() if self.key_id not in (KEY_ID_NONE, KEY_ID_HMAC_SHA_1_96, KEY_ID_HMAC_SHA_256_128): raise ValueError() if not isinstance(self.authentication_data, bytes): raise ValueError() if not isinstance(self.ttl, numbers.Integral) \ or self.ttl < 0 or self.ttl > 0xffffffff: raise ValueError() if not isinstance(self.eid_prefix, (IPv4Network, IPv6Network)): raise ValueError() if self.is_reply: if not isinstance(self.reply, LCAFNATTraversalAddress): raise ValueError("An InfoMessage which is an Info-Reply must contain an LCAFNATTraversalAddress") else: if self.reply is not None: raise ValueError("An InfoMessage which is an Info-Request can not contain a reply")
Check if the current settings conform to the LISP specifications and fix them where possible.
20,940
def en_disable_breakpoint_by_number(self, bpnum, do_enable=True): "Enable or disable a breakpoint given its breakpoint number." success, msg, bp = self.get_breakpoint(bpnum) if not success: return success, msg if do_enable: endis = else: endis = pass if bp.enabled == do_enable: return (False, ( % (str(bpnum), endis,))) bp.enabled = do_enable return (True, )
Enable or disable a breakpoint given its breakpoint number.
20,941
def proba2labels(proba: [list, np.ndarray], confident_threshold: float, classes: [list, np.ndarray]) -> List[List]: y = [] for sample in proba: to_add = np.where(sample > confident_threshold)[0] if len(to_add) > 0: y.append(np.array(classes)[to_add].tolist()) else: y.append(np.array([np.array(classes)[np.argmax(sample)]]).tolist()) return y
Convert vectors of probabilities to labels using confident threshold (if probability to belong with the class is bigger than confident_threshold, sample belongs with the class; if no probabilities bigger than confident threshold, sample belongs with the class with the biggest probability) Args: proba: list of samples where each sample is a vector of probabilities to belong with given classes confident_threshold (float): boundary of probability to belong with a class classes: array of classes' names Returns: list of lists of labels for each sample
20,942
def _get_migration_files(self, path): files = glob.glob(os.path.join(path, "[0-9]*_*.py")) if not files: return [] files = list(map(lambda f: os.path.basename(f).replace(".py", ""), files)) files = sorted(files) return files
Get all of the migration files in a given path. :type path: str :rtype: list
20,943
def get_status(self): logger.debug("get server status") server_status = ctypes.c_int() cpu_status = ctypes.c_int() clients_count = ctypes.c_int() error = self.library.Srv_GetStatus(self.pointer, ctypes.byref(server_status), ctypes.byref(cpu_status), ctypes.byref(clients_count)) check_error(error) logger.debug("status server %s cpu %s clients %s" % (server_status.value, cpu_status.value, clients_count.value)) return snap7.snap7types.server_statuses[server_status.value], \ snap7.snap7types.cpu_statuses[cpu_status.value], \ clients_count.value
Reads the server status, the Virtual CPU status and the number of the clients connected. :returns: server status, cpu status, client count
20,944
def transferReporter(self, xferId, message): if self.is_stopped(): return True _asp_message = AsperaMessage(message) if not _asp_message.is_msg_type( [enumAsperaMsgType.INIT, enumAsperaMsgType.DONE, enumAsperaMsgType.ERROR, enumAsperaMsgType.FILEERROR, enumAsperaMsgType.STATS]): return _session_id = _asp_message.get_session_id() _msg = self.debug_id(xferId, _session_id) + " : " + _asp_message._msg_type logger.info(_msg) with self._session_lock: if _asp_message.is_msg_type([enumAsperaMsgType.INIT]): assert(_session_id not in self._sessions) _session = AsperaSession(_session_id) self._sessions[_session_id] = _session self.notify_init() else: _session = self._sessions[_session_id] if _asp_message.is_msg_type([enumAsperaMsgType.DONE]): if _session.set_bytes_transferred(_asp_message.get_bytes_transferred()): self.notify_progress() _session.set_success() self.notify_done() elif _asp_message.is_msg_type([enumAsperaMsgType.ERROR, enumAsperaMsgType.FILEERROR]): _session.set_error(_asp_message.get_error_descr()) self.notify_done(error=True) elif _asp_message.is_msg_type([enumAsperaMsgType.STATS]): if _session.set_bytes_transferred(_asp_message.get_bytes_transferred()): self.notify_progress()
the callback method used by the Aspera sdk during transfer to notify progress, error or successful completion
20,945
def decide_child_program(args_executable, args_child_program): if os.path.sep in args_child_program: logger.error( "The parameter to --exec must be a file name (to be found " "inside venv%s' not found. If you want to run an executable " "file from a library installed in the virtualenv " "check the `--exec` option in the help.", args_child_program) raise FadesError("child program not found.") analyzable_child_program = args_child_program child_program = args_child_program else: analyzable_child_program = None child_program = None return analyzable_child_program, child_program
Decide which the child program really is (if any).
20,946
def set_filter(self, slices, values): self.filters = [[sl,values[sl]] for sl in slices]
Sets Fourier-space filters for the image. The image is filtered by subtracting values from the image at slices. Parameters ---------- slices : List of indices or slice objects. The q-values in Fourier space to filter. values : np.ndarray The complete array of Fourier space peaks to subtract off. values should be the same size as the FFT of the image; only the portions of values at slices will be removed. Examples -------- To remove a two Fourier peaks in the data at q=(10, 10, 10) & (245, 245, 245), where im is the residuals of a model: * slices = [(10,10,10), (245, 245, 245)] * values = np.fft.fftn(im) * im.set_filter(slices, values)
20,947
def getAsKml(self, session): statement = .format(self.geometryColumnName, self.tableName, self.id) result = session.execute(statement) for row in result: return row.kml
Retrieve the geometry in KML format. This method is a veneer for an SQL query that calls the ``ST_AsKml()`` function on the geometry column. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Returns: str: KML string representation of geometry.
20,948
def padded_sequence_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): if common_layers.shape_list(predictions)[-1] == 1: return rounding_sequence_accuracy( predictions, labels, weights_fn=weights_fn) with tf.variable_scope( "padded_sequence_accuracy", values=[predictions, labels]): padded_predictions, padded_labels = common_layers.pad_with_zeros( predictions, labels) weights = weights_fn(padded_labels) predictions_shape = common_layers.shape_list(padded_predictions) batch_size = predictions_shape[0] num_classes = predictions_shape[-1] flat_size = common_layers.list_product( common_layers.shape_list(padded_labels)[1:]) padded_predictions = tf.reshape( padded_predictions, [batch_size, common_layers.list_product(predictions_shape[1:-1]), num_classes]) padded_labels = tf.reshape(padded_labels, [batch_size, flat_size]) weights = tf.reshape(weights, [batch_size, flat_size]) outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1)) padded_labels = tf.to_int32(padded_labels) not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights axis = list(range(1, len(outputs.get_shape()))) correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis)) return correct_seq, tf.constant(1.0)
Percentage of times that predictions matches labels everywhere (non-0).
20,949
def ge(self, value): self.op = self.negate_op = self.value = self._value(value) return self
Construct a greater than or equal to (``>=``) filter. :param value: Filter value :return: :class:`filters.Field <filters.Field>` object :rtype: filters.Field
20,950
def response_cookies(self): try: ret = {} for cookie_base_uris in self.response.cookies._cookies.values(): for cookies in cookie_base_uris.values(): for cookie in cookies.keys(): ret[cookie] = cookies[cookie].value return ret except Exception as e: self.error = ApiError( "Exception in making Request with:: %s\n%s" % ( e_, traceback.format_exc())) raise Exception(self.error)
This will return all cookies set :return: dict {name, value}
20,951
def arg(self, state, index, stack_base=None): session = self.arg_session if self.args is None: arg_loc = [session.next_arg(False) for _ in range(index + 1)][-1] else: arg_loc = self.args[index] return arg_loc.get_value(state, stack_base=stack_base)
Returns a bitvector expression representing the nth argument of a function. `stack_base` is an optional pointer to the top of the stack at the function start. If it is not specified, use the current stack pointer. WARNING: this assumes that none of the arguments are floating-point and they're all single-word-sized, unless you've customized this CC.
20,952
def pretty_str(label, arr): def is_col(a): try: return a.shape[0] > 1 and a.shape[1] == 1 except (AttributeError, IndexError): return False if label is None: label = if label: label += if is_col(arr): return label + str(arr.T).replace(, ) + rows = str(arr).split() if not rows: return s = label + rows[0] pad = * len(label) for line in rows[1:]: s = s + + pad + line return s
Generates a pretty printed NumPy array with an assignment. Optionally transposes column vectors so they are drawn on one line. Strictly speaking arr can be any time convertible by `str(arr)`, but the output may not be what you want if the type of the variable is not a scalar or an ndarray. Examples -------- >>> pprint('cov', np.array([[4., .1], [.1, 5]])) cov = [[4. 0.1] [0.1 5. ]] >>> print(pretty_str('x', np.array([[1], [2], [3]]))) x = [[1 2 3]].T
20,953
def prettify_json(json_string): try: data = json.loads(json_string) html = + json.dumps(data, sort_keys=True, indent=4) + except: html = json_string return mark_safe(html)
Given a JSON string, it returns it as a safe formatted HTML
20,954
def break_around_binary_operator(logical_line, tokens): r def is_binary_operator(token_type, text): return ((token_type == tokenize.OP or text in [, ]) and text not in "()[]{},:.;@=%~") line_break = False unary_context = True previous_token_type = None previous_text = None for token_type, text, start, end, line in tokens: if token_type == tokenize.COMMENT: continue if ( in text or in text) and token_type != tokenize.STRING: line_break = True else: if (is_binary_operator(token_type, text) and line_break and not unary_context and not is_binary_operator(previous_token_type, previous_text)): yield start, "W503 line break before binary operator" unary_context = text in line_break = False previous_token_type = token_type previous_text = text
r""" Avoid breaks before binary operators. The preferred place to break around a binary operator is after the operator, not before it. W503: (width == 0\n + height == 0) W503: (width == 0\n and height == 0) Okay: (width == 0 +\n height == 0) Okay: foo(\n -x) Okay: foo(x\n []) Okay: x = '''\n''' + '' Okay: foo(x,\n -y) Okay: foo(x, # comment\n -y) Okay: var = (1 &\n ~2) Okay: var = (1 /\n -2) Okay: var = (1 +\n -1 +\n -2)
20,955
def dpi(self): def int_dpi(dpi): try: int_dpi = int(round(float(dpi))) if int_dpi < 1 or int_dpi > 2048: int_dpi = 72 except (TypeError, ValueError): int_dpi = 72 return int_dpi def normalize_pil_dpi(pil_dpi): if isinstance(pil_dpi, tuple): return (int_dpi(pil_dpi[0]), int_dpi(pil_dpi[1])) return (72, 72) return normalize_pil_dpi(self._pil_props[2])
A (horz_dpi, vert_dpi) 2-tuple specifying the dots-per-inch resolution of this image. A default value of (72, 72) is used if the dpi is not specified in the image file.
20,956
def list_build_configurations_for_product_version(product_id, version_id, page_size=200, page_index=0, sort="", q=""): data = list_build_configurations_for_project_raw(product_id, version_id, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
List all BuildConfigurations associated with the given ProductVersion
20,957
def _add_point_scalar(self, scalars, name, set_active=False, deep=True): if not isinstance(scalars, np.ndarray): raise TypeError() if scalars.shape[0] != self.n_points: raise Exception( + ) if scalars.dtype == np.bool: scalars = scalars.view(np.uint8) if name not in self._point_bool_array_names: self._point_bool_array_names.append(name) if not scalars.flags.c_contiguous: scalars = np.ascontiguousarray(scalars) vtkarr = numpy_to_vtk(scalars, deep=deep) vtkarr.SetName(name) self.GetPointData().AddArray(vtkarr) if set_active or self.active_scalar_info[1] is None: self.GetPointData().SetActiveScalars(name) self._active_scalar_info = [POINT_DATA_FIELD, name]
Adds point scalars to the mesh Parameters ---------- scalars : numpy.ndarray Numpy array of scalars. Must match number of points. name : str Name of point scalars to add. set_active : bool, optional Sets the scalars to the active plotting scalars. Default False. deep : bool, optional Does not copy scalars when False. A reference to the scalars must be kept to avoid a segfault.
20,958
def _update_dict(data, default_data, replace_data=False): if not data: data = default_data.copy() return data if not isinstance(data, dict): raise TypeError() if len(data) > 255: raise ValueError() for i in data.keys(): if not isinstance(i, int): raise TypeError() if i < 0 or i > 255: raise ValueError() if not replace_data: data.update(default_data) return data
Update algorithm definition type dictionaries
20,959
def shot_chart_jointgrid(x, y, data=None, joint_type="scatter", title="", joint_color="b", cmap=None, xlim=(-250, 250), ylim=(422.5, -47.5), court_color="gray", court_lw=1, outer_lines=False, flip_court=False, joint_kde_shade=True, gridsize=None, marginals_color="b", marginals_type="both", marginals_kde_shade=True, size=(12, 11), space=0, despine=False, joint_kws=None, marginal_kws=None, **kwargs): if joint_kws is None: joint_kws = {} joint_kws.update(kwargs) if marginal_kws is None: marginal_kws = {} if cmap is None: cmap = sns.light_palette(joint_color, as_cmap=True) if flip_court: xlim = xlim[::-1] ylim = ylim[::-1] grid = sns.JointGrid(x=x, y=y, data=data, xlim=xlim, ylim=ylim, space=space) if joint_type == "scatter": grid = grid.plot_joint(plt.scatter, color=joint_color, **joint_kws) elif joint_type == "kde": grid = grid.plot_joint(sns.kdeplot, cmap=cmap, shade=joint_kde_shade, **joint_kws) elif joint_type == "hex": if gridsize is None: from seaborn.distributions import _freedman_diaconis_bins x_bin = _freedman_diaconis_bins(x) y_bin = _freedman_diaconis_bins(y) gridsize = int(np.mean([x_bin, y_bin])) grid = grid.plot_joint(plt.hexbin, gridsize=gridsize, cmap=cmap, **joint_kws) else: raise ValueError("joint_type must be , , or .") if marginals_type == "both": grid = grid.plot_marginals(sns.distplot, color=marginals_color, **marginal_kws) elif marginals_type == "hist": grid = grid.plot_marginals(sns.distplot, color=marginals_color, kde=False, **marginal_kws) elif marginals_type == "kde": grid = grid.plot_marginals(sns.kdeplot, color=marginals_color, shade=marginals_kde_shade, **marginal_kws) else: raise ValueError("marginals_type must be , , or .") grid.fig.set_size_inches(size) ax = grid.fig.get_axes()[0] draw_court(ax, color=court_color, lw=court_lw, outer_lines=outer_lines) grid.set_axis_labels(xlabel="", ylabel="") ax.tick_params(labelbottom="off", labelleft="off") ax.set_title(title, y=1.2, fontsize=18) for spine in ax.spines: ax.spines[spine].set_lw(court_lw) ax.spines[spine].set_color(court_color) grid.ax_marg_x.spines[spine].set_lw(court_lw) grid.ax_marg_x.spines[spine].set_color(court_color) grid.ax_marg_y.spines[spine].set_lw(court_lw) grid.ax_marg_y.spines[spine].set_color(court_color) if despine: ax.spines["top"].set_visible(False) ax.spines["bottom"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["left"].set_visible(False) return grid
Returns a JointGrid object containing the shot chart. This function allows for more flexibility in customizing your shot chart than the ``shot_chart_jointplot`` function. Parameters ---------- x, y : strings or vector The x and y coordinates of the shots taken. They can be passed in as vectors (such as a pandas Series) or as columns from the pandas DataFrame passed into ``data``. data : DataFrame, optional DataFrame containing shots where ``x`` and ``y`` represent the shot location coordinates. joint_type : { "scatter", "kde", "hex" }, optional The type of shot chart for the joint plot. title : str, optional The title for the plot. joint_color : matplotlib color, optional Color used to plot the shots on the joint plot. cmap : matplotlib Colormap object or name, optional Colormap for the range of data values. If one isn't provided, the colormap is derived from the value passed to ``color``. Used for KDE and Hexbin joint plots. {x, y}lim : two-tuples, optional The axis limits of the plot. The defaults represent the out of bounds lines and half court line. court_color : matplotlib color, optional The color of the court lines. court_lw : float, optional The linewidth the of the court lines. outer_lines : boolean, optional If ``True`` the out of bound lines are drawn in as a matplotlib Rectangle. flip_court : boolean, optional If ``True`` orients the hoop towards the bottom of the plot. Default is ``False``, which orients the court where the hoop is towards the top of the plot. joint_kde_shade : boolean, optional Default is ``True``, which shades in the KDE contours on the joint plot. gridsize : int, optional Number of hexagons in the x-direction. The default is calculated using the Freedman-Diaconis method. marginals_color : matplotlib color, optional Color used to plot the shots on the marginal plots. marginals_type : { "both", "hist", "kde"}, optional The type of plot for the marginal plots. marginals_kde_shade : boolean, optional Default is ``True``, which shades in the KDE contours on the marginal plots. size : tuple, optional The width and height of the plot in inches. space : numeric, optional The space between the joint and marginal plots. despine : boolean, optional If ``True``, removes the spines. {joint, marginal}_kws : dicts Additional kewyord arguments for joint and marginal plot components. kwargs : key, value pairs Keyword arguments for matplotlib Collection properties or seaborn plots. Returns ------- grid : JointGrid The JointGrid object with the shot chart plotted on it.
20,960
def set_scene_config(self, scene_id, config): if not scene_id in self.state.scenes: err_msg = "Requested to reconfigure scene {sceneNum}, which does not exist".format(sceneNum=scene_id) logging.info(err_msg) return(False, 0, err_msg) if scene_id == self.state.activeSceneId: pass self.state.scenes[scene_id] = self.state.scenes[scene_id]._replace(config=config) sequence_number = self.zmq_publisher.publish_scene_config(scene_id, config) logging.debug("Reconfigured scene {sceneNum}".format(sceneNum=scene_id)) return (True, sequence_number, "OK")
reconfigure a scene by scene ID
20,961
def events(self): if self._events is None: self._events = EventList(self._version, workspace_sid=self._solution[], ) return self._events
Access the events :returns: twilio.rest.taskrouter.v1.workspace.event.EventList :rtype: twilio.rest.taskrouter.v1.workspace.event.EventList
20,962
def LineWrap(text, omit_sgr=False): def _SplitWithSgr(text_line): token_list = sgr_re.split(text_line) text_line_list = [] line_length = 0 for (index, token) in enumerate(token_list): if token is : continue if sgr_re.match(token): text_line_list.append(token) text_line = .join(token_list[index +1:]) else: if line_length + len(token) <= width: text_line_list.append(token) line_length += len(token) text_line = .join(token_list[index +1:]) else: text_line_list.append(token[:width - line_length]) text_line = token[width - line_length:] text_line += .join(token_list[index +1:]) break return (.join(text_line_list), text_line)
Break line to fit screen width, factoring in ANSI/SGR escape sequences. Args: text: String to line wrap. omit_sgr: Bool, to omit counting ANSI/SGR sequences in the length. Returns: Text with additional line wraps inserted for lines grater than the width.
20,963
def add_entry(self, src, dst, duration=3600, src_port1=None, src_port2=None, src_proto=, dst_port1=None, dst_port2=None, dst_proto=): self.entries.setdefault(, []).append(prepare_blacklist( src, dst, duration, src_port1, src_port2, src_proto, dst_port1, dst_port2, dst_proto))
Create a blacklist entry. A blacklist can be added directly from the engine node, or from the system context. If submitting from the system context, it becomes a global blacklist. This will return the properly formatted json to submit. :param src: source address, with cidr, i.e. 10.10.10.10/32 or 'any' :param dst: destination address with cidr, i.e. 1.1.1.1/32 or 'any' :param int duration: length of time to blacklist Both the system and engine context blacklist allow kw to be passed to provide additional functionality such as adding source and destination ports or port ranges and specifying the protocol. The following parameters define the ``kw`` that can be passed. The following example shows creating an engine context blacklist using additional kw:: engine.blacklist('1.1.1.1/32', '2.2.2.2/32', duration=3600, src_port1=1000, src_port2=1500, src_proto='predefined_udp', dst_port1=3, dst_port2=3000, dst_proto='predefined_udp') :param int src_port1: start source port to limit blacklist :param int src_port2: end source port to limit blacklist :param str src_proto: source protocol. Either 'predefined_tcp' or 'predefined_udp'. (default: 'predefined_tcp') :param int dst_port1: start dst port to limit blacklist :param int dst_port2: end dst port to limit blacklist :param str dst_proto: dst protocol. Either 'predefined_tcp' or 'predefined_udp'. (default: 'predefined_tcp') .. note:: if blocking a range of ports, use both src_port1 and src_port2, otherwise providing only src_port1 is adequate. The same applies to dst_port1 / dst_port2. In addition, if you provide src_portX but not dst_portX (or vice versa), the undefined port side definition will default to all ports.
20,964
def repr_def_class(self, class_data): classname = self.formatted_classname(class_data["classname"]) if classname not in self.classes: self.lines.append("") self.lines.append("class %s(%s):" % (classname, self.basename)) kwargs = list() setattr_arguments = list() for attr in self._classes[classname]: kwargs.append("%s=None" % attr) setattr_arguments.append( self.Tab2 + "self.%s = %s" % (attr, attr)) if len(kwargs): line = self.Tab + "def __init__(self, %s):" % ", ".join(kwargs) else: line = self.Tab + "def __init__(self):" self.lines.append(line) for setattr_argument in setattr_arguments: self.lines.append(setattr_argument) if len(setattr_arguments): self.lines.append("") self.classes.add(classname)
Create code like this:: class Person(Base): def __init__(self, person_id=None, name=None): self.person_id = person_id self.name = name
20,965
def addPolylineAnnot(self, points): CheckParent(self) val = _fitz.Page_addPolylineAnnot(self, points) if not val: return val.thisown = True val.parent = weakref.proxy(self) self._annot_refs[id(val)] = val return val
Add a 'Polyline' annotation for a sequence of points.
20,966
def visit_Call(self, node): s an Intrinsic) or if Pythran already computed itdemobdef foo(a, d): __builtin__.dict.setdefault(d, 0, a)<unbound-value>aABCD self.generic_visit(node) f = node.func if isinstance(f, ast.Attribute) and f.attr == "partial": return self.add(node, {node}) else: return_alias = self.call_return_alias(node) all_aliases = set() for value in return_alias: if isinstance(value, (ContainerOf, ast.FunctionDef, Intrinsic)): all_aliases.add(value) elif value in self.result: all_aliases.update(self.result[value]) else: try: ap = Aliases.access_path(value) all_aliases.update(self.aliases.get(ap, ())) except NotImplementedError: all_aliases.add(value) return self.add(node, all_aliases)
Resulting node alias to the return_alias of called function, if the function is already known by Pythran (i.e. it's an Intrinsic) or if Pythran already computed it's ``return_alias`` behavior. >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> fun = """ ... def f(a): return a ... def foo(b): c = f(b)""" >>> module = ast.parse(fun) The ``f`` function create aliasing between the returned value and its first argument. >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) f(b) => ['b'] This also works with intrinsics, e.g ``dict.setdefault`` which may create alias between its third argument and the return value. >>> fun = 'def foo(a, d): __builtin__.dict.setdefault(d, 0, a)' >>> module = ast.parse(fun) >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) __builtin__.dict.setdefault(d, 0, a) => ['<unbound-value>', 'a'] Note that complex cases can arise, when one of the formal parameter is already known to alias to various values: >>> fun = """ ... def f(a, b): return a and b ... def foo(A, B, C, D): return f(A or B, C or D)""" >>> module = ast.parse(fun) >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) f((A or B), (C or D)) => ['A', 'B', 'C', 'D']
20,967
def plot_mv_grid_topology(self, technologies=False, **kwargs): if self.network.pypsa is None: try: timesteps = self.network.timeseries.timeindex self.network.pypsa = pypsa_io.to_pypsa( self.network, mode=None, timesteps=timesteps) except: logging.warning( "pypsa representation of MV grid needed to plot MV " "grid topology.") if self.network.pypsa is not None: plots.mv_grid_topology( self.network.pypsa, self.network.config, node_color= if technologies is True else None, filename=kwargs.get(, None), grid_district_geom=kwargs.get(, True), background_map=kwargs.get(, True), xlim=kwargs.get(, None), ylim=kwargs.get(, None), title=kwargs.get(, ))
Plots plain MV grid topology and optionally nodes by technology type (e.g. station or generator). Parameters ---------- technologies : :obj:`Boolean` If True plots stations, generators, etc. in the grid in different colors. If False does not plot any nodes. Default: False. For more information see :func:`edisgo.tools.plots.mv_grid_topology`.
20,968
def merge_las(*las_files): if len(las_files) == 1: las_files = las_files[0] if not las_files: raise ValueError("No files to merge") if not utils.files_have_same_dtype(las_files): raise ValueError("All files must have the same point format") header = las_files[0].header num_pts_merged = sum(len(las.points) for las in las_files) merged = create_from_header(header) for dim_name, dim_type in las_files[0].points_data.point_format.extra_dims: merged.add_extra_dim(dim_name, dim_type) merged.points = np.zeros(num_pts_merged, merged.points.dtype) merged_x = np.zeros(num_pts_merged, np.float64) merged_y = np.zeros(num_pts_merged, np.float64) merged_z = np.zeros(num_pts_merged, np.float64) offset = 0 for i, las in enumerate(las_files, start=1): slc = slice(offset, offset + len(las.points)) merged.points[slc] = las.points merged_x[slc] = las.x merged_y[slc] = las.y merged_z[slc] = las.z merged[][slc] = i offset += len(las.points) merged.x = merged_x merged.y = merged_y merged.z = merged_z return merged
Merges multiple las files into one merged = merge_las(las_1, las_2) merged = merge_las([las_1, las_2, las_3]) Parameters ---------- las_files: Iterable of LasData or LasData Returns ------- pylas.lasdatas.base.LasBase The result of the merging
20,969
def edit_profile(): if g.user is None: abort(401) form = dict(name=g.user.name, email=g.user.email) if request.method == : if in request.form: User.get_collection().remove(g.user) session[] = None flash(u) return redirect(url_for()) form[] = request.form[] form[] = request.form[] if not form[]: flash(u) elif not in form[]: flash(u) else: flash(u) g.user.name = form[] g.user.email = form[] uid = User.get_collection().save(g.user) return redirect(url_for()) return render_template(, form=form)
Updates a profile
20,970
def modify_replication_instance(ReplicationInstanceArn=None, AllocatedStorage=None, ApplyImmediately=None, ReplicationInstanceClass=None, VpcSecurityGroupIds=None, PreferredMaintenanceWindow=None, MultiAZ=None, EngineVersion=None, AllowMajorVersionUpgrade=None, AutoMinorVersionUpgrade=None, ReplicationInstanceIdentifier=None): pass
Modifies the replication instance to apply new settings. You can change one or more parameters by specifying these parameters and the new values in the request. Some settings are applied during the maintenance window. See also: AWS API Documentation :example: response = client.modify_replication_instance( ReplicationInstanceArn='string', AllocatedStorage=123, ApplyImmediately=True|False, ReplicationInstanceClass='string', VpcSecurityGroupIds=[ 'string', ], PreferredMaintenanceWindow='string', MultiAZ=True|False, EngineVersion='string', AllowMajorVersionUpgrade=True|False, AutoMinorVersionUpgrade=True|False, ReplicationInstanceIdentifier='string' ) :type ReplicationInstanceArn: string :param ReplicationInstanceArn: [REQUIRED] The Amazon Resource Name (ARN) of the replication instance. :type AllocatedStorage: integer :param AllocatedStorage: The amount of storage (in gigabytes) to be allocated for the replication instance. :type ApplyImmediately: boolean :param ApplyImmediately: Indicates whether the changes should be applied immediately or during the next maintenance window. :type ReplicationInstanceClass: string :param ReplicationInstanceClass: The compute and memory capacity of the replication instance. Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge :type VpcSecurityGroupIds: list :param VpcSecurityGroupIds: Specifies the VPC security group to be used with the replication instance. The VPC security group must work with the VPC containing the replication instance. (string) -- :type PreferredMaintenanceWindow: string :param PreferredMaintenanceWindow: The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied. Default: Uses existing setting Format: ddd:hh24:mi-ddd:hh24:mi Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes :type MultiAZ: boolean :param MultiAZ: Specifies if the replication instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set to true . :type EngineVersion: string :param EngineVersion: The engine version number of the replication instance. :type AllowMajorVersionUpgrade: boolean :param AllowMajorVersionUpgrade: Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the replication instance's current version. :type AutoMinorVersionUpgrade: boolean :param AutoMinorVersionUpgrade: Indicates that minor version upgrades will be applied automatically to the replication instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and AWS DMS has enabled auto patching for that engine version. :type ReplicationInstanceIdentifier: string :param ReplicationInstanceIdentifier: The replication instance identifier. This parameter is stored as a lowercase string. :rtype: dict :return: { 'ReplicationInstance': { 'ReplicationInstanceIdentifier': 'string', 'ReplicationInstanceClass': 'string', 'ReplicationInstanceStatus': 'string', 'AllocatedStorage': 123, 'InstanceCreateTime': datetime(2015, 1, 1), 'VpcSecurityGroups': [ { 'VpcSecurityGroupId': 'string', 'Status': 'string' }, ], 'AvailabilityZone': 'string', 'ReplicationSubnetGroup': { 'ReplicationSubnetGroupIdentifier': 'string', 'ReplicationSubnetGroupDescription': 'string', 'VpcId': 'string', 'SubnetGroupStatus': 'string', 'Subnets': [ { 'SubnetIdentifier': 'string', 'SubnetAvailabilityZone': { 'Name': 'string' }, 'SubnetStatus': 'string' }, ] }, 'PreferredMaintenanceWindow': 'string', 'PendingModifiedValues': { 'ReplicationInstanceClass': 'string', 'AllocatedStorage': 123, 'MultiAZ': True|False, 'EngineVersion': 'string' }, 'MultiAZ': True|False, 'EngineVersion': 'string', 'AutoMinorVersionUpgrade': True|False, 'KmsKeyId': 'string', 'ReplicationInstanceArn': 'string', 'ReplicationInstancePublicIpAddress': 'string', 'ReplicationInstancePrivateIpAddress': 'string', 'ReplicationInstancePublicIpAddresses': [ 'string', ], 'ReplicationInstancePrivateIpAddresses': [ 'string', ], 'PubliclyAccessible': True|False, 'SecondaryAvailabilityZone': 'string' } } :returns: Must contain from 1 to 63 alphanumeric characters or hyphens. First character must be a letter. Cannot end with a hyphen or contain two consecutive hyphens.
20,971
def newton_iterate(evaluate_fn, s, t): r norm_update_prev = None norm_update = None linear_updates = 0 current_s = s current_t = t for index in six.moves.xrange(MAX_NEWTON_ITERATIONS): jacobian, func_val = evaluate_fn(current_s, current_t) if jacobian is None: return True, current_s, current_t singular, delta_s, delta_t = _helpers.solve2x2( jacobian, func_val[:, 0] ) if singular: break norm_update_prev = norm_update norm_update = np.linalg.norm([delta_s, delta_t], ord=2) if index > 0 and norm_update > 0.25 * norm_update_prev: linear_updates += 1 if index >= 4 and 3 * linear_updates >= 2 * index: break norm_soln = np.linalg.norm([current_s, current_t], ord=2) current_s -= delta_s current_t -= delta_t if norm_update < NEWTON_ERROR_RATIO * norm_soln: return True, current_s, current_t return False, current_s, current_t
r"""Perform a Newton iteration. In this function, we assume that :math:`s` and :math:`t` are nonzero, this makes convergence easier to detect since "relative error" at ``0.0`` is not a useful measure. There are several tolerance / threshold quantities used below: * :math:`10` (:attr:`MAX_NEWTON_ITERATIONS`) iterations will be done before "giving up". This is based on the assumption that we are already starting near a root, so quadratic convergence should terminate quickly. * :math:`\tau = \frac{1}{4}` is used as the boundary between linear and superlinear convergence. So if the current error :math:`\|p_{n + 1} - p_n\|` is not smaller than :math:`\tau` times the previous error :math:`\|p_n - p_{n - 1}\|`, then convergence is considered to be linear at that point. * :math:`\frac{2}{3}` of all iterations must be converging linearly for convergence to be stopped (and moved to the next regime). This will only be checked after 4 or more updates have occurred. * :math:`\tau = 2^{-42}` (:attr:`NEWTON_ERROR_RATIO`) is used to determine that an update is sufficiently small to stop iterating. So if the error :math:`\|p_{n + 1} - p_n\|` smaller than :math:`\tau` times size of the term being updated :math:`\|p_n\|`, then we exit with the "correct" answer. It is assumed that ``evaluate_fn`` will use a Jacobian return value of :data:`None` to indicate that :math:`F(s, t)` is exactly ``0.0``. We **assume** that if the function evaluates to exactly ``0.0``, then we are at a solution. It is possible however, that badly parameterized curves can evaluate to exactly ``0.0`` for inputs that are relatively far away from a solution (see issue #21). Args: evaluate_fn (Callable[Tuple[float, float], tuple]): A callable which takes :math:`s` and :math:`t` and produces an evaluated function value and the Jacobian matrix. s (float): The (first) parameter where the iteration will start. t (float): The (second) parameter where the iteration will start. Returns: Tuple[bool, float, float]: The triple of * Flag indicating if the iteration converged. * The current :math:`s` value when the iteration stopped. * The current :math:`t` value when the iteration stopped.
20,972
def node_from_xml(xmlfile, nodefactory=Node): root = parse(xmlfile).getroot() return node_from_elem(root, nodefactory)
Convert a .xml file into a Node object. :param xmlfile: a file name or file object open for reading
20,973
def build(args): if len(args) != 1: log.error() app.quit(1) target = address.new(args[0]) log.info(, target) try: bb = Butcher() bb.clean() bb.load_graph(target) bb.build(target) except (gitrepo.GitError, error.BrokenGraph, error.NoSuchTargetError) as err: log.fatal(err) app.quit(1) except error.OverallBuildFailure as err: log.fatal(err) log.fatal() [log.fatal(, e.node, e) for e in bb.failure_log] app.quit(1)
Build a target and its dependencies.
20,974
def on_draw(self, e): gloo.clear() for visual in self.visuals: logger.log(5, "Draw visual `%s`.", visual) visual.on_draw()
Draw all visuals.
20,975
def multi(method): @functools.wraps(method) def multi(self, address=): values = flask.request.values address = urllib.parse.unquote_plus(address) if address and values and not address.endswith(): address += result = {} for a in values or : try: if not self.project: raise ValueError() ed = editor.Editor(address + a, self.project) result[address + a] = {: method(self, ed, a)} except: if self.project: traceback.print_exc() result[address + a] = {: % a} return flask.jsonify(result) return multi
Decorator for RestServer methods that take multiple addresses
20,976
def _parse_application_info(self, info_container): m = applications_regex.search(info_container.text) if m: self.open_applications = m.group(1) == "opened"
Parses the guild's application info. Parameters ---------- info_container: :class:`bs4.Tag` The parsed content of the information container.
20,977
def graph_from_dot_file(path): fd = open(path, ) data = fd.read() fd.close() return graph_from_dot_data(data)
Load graph as defined by a DOT file. The file is assumed to be in DOT format. It will be loaded, parsed and a Dot class will be returned, representing the graph.
20,978
def days_at_time(days, t, tz, day_offset=0): days = pd.DatetimeIndex(days).tz_localize(None) if len(days) == 0: return days.tz_localize(UTC) delta = pd.Timedelta( days=day_offset, hours=t.hour, minutes=t.minute, seconds=t.second, ) return (days + delta).tz_localize(tz).tz_convert(UTC)
Create an index of days at time ``t``, interpreted in timezone ``tz``. The returned index is localized to UTC. Parameters ---------- days : DatetimeIndex An index of dates (represented as midnight). t : datetime.time The time to apply as an offset to each day in ``days``. tz : pytz.timezone The timezone to use to interpret ``t``. day_offset : int The number of days we want to offset @days by Examples -------- In the example below, the times switch from 13:45 to 12:45 UTC because March 13th is the daylight savings transition for US/Eastern. All the times are still 8:45 when interpreted in US/Eastern. >>> import pandas as pd; import datetime; import pprint >>> dts = pd.date_range('2016-03-12', '2016-03-14') >>> dts_at_845 = days_at_time(dts, datetime.time(8, 45), 'US/Eastern') >>> pprint.pprint([str(dt) for dt in dts_at_845]) ['2016-03-12 13:45:00+00:00', '2016-03-13 12:45:00+00:00', '2016-03-14 12:45:00+00:00']
20,979
def listMembers(self, id, headers=None, query_params=None, content_type="application/json"): uri = self.client.base_url + "/network/"+id+"/member" return self.client.get(uri, None, headers, query_params, content_type)
Get a list of network members It is method for GET /network/{id}/member
20,980
def _build_table(self) -> Dict[State, Tuple[Multiplex, ...]]: result: Dict[State, Tuple[Multiplex, ...]] = {} for state in self.influence_graph.all_states(): result[state] = tuple(multiplex for multiplex in self.influence_graph.multiplexes if multiplex.is_active(state)) return result
Private method which build the table which map a State to the active multiplex.
20,981
def pad_batch_dimension_for_multiple_chains( observed_time_series, model, chain_batch_shape): [ observed_time_series, is_missing ] = canonicalize_observed_time_series_with_mask(observed_time_series) event_ndims = 2 model_batch_ndims = ( model.batch_shape.ndims if model.batch_shape.ndims is not None else tf.shape(input=model.batch_shape_tensor())[0]) chain_batch_shape = tf.convert_to_tensor( value=chain_batch_shape, name=, dtype=tf.int32) if not chain_batch_shape.shape.is_fully_defined(): raise ValueError(.format( chain_batch_shape)) if chain_batch_shape.shape.ndims == 0: chain_batch_shape = chain_batch_shape[tf.newaxis] chain_batch_ndims = tf.compat.dimension_value(chain_batch_shape.shape[0]) def do_padding(observed_time_series_tensor): current_sample_shape = tf.shape( input=observed_time_series_tensor)[:-(model_batch_ndims + event_ndims)] current_batch_and_event_shape = tf.shape( input=observed_time_series_tensor)[-(model_batch_ndims + event_ndims):] return tf.reshape( tensor=observed_time_series_tensor, shape=tf.concat([ current_sample_shape, tf.ones([chain_batch_ndims], dtype=tf.int32), current_batch_and_event_shape], axis=0)) observed_time_series = prefer_static.cond( (dist_util.prefer_static_rank(observed_time_series) > model_batch_ndims + event_ndims), lambda: do_padding(observed_time_series), lambda: observed_time_series) if is_missing is not None: is_missing = prefer_static.cond( (dist_util.prefer_static_rank(is_missing) > model_batch_ndims + event_ndims), lambda: do_padding(is_missing), lambda: is_missing) return missing_values_util.MaskedTimeSeries(observed_time_series, is_missing=is_missing) return observed_time_series
Expand the observed time series with extra batch dimension(s).
20,982
def update_video(video_data): try: video = _get_video(video_data.get("edx_video_id")) except Video.DoesNotExist: error_message = u"Video not found when trying to update video with edx_video_id: {0}".format(video_data.get("edx_video_id")) raise ValVideoNotFoundError(error_message) serializer = VideoSerializer(video, data=video_data) if serializer.is_valid(): serializer.save() return video_data.get("edx_video_id") else: raise ValCannotUpdateError(serializer.errors)
Called on to update Video objects in the database update_video is used to update Video objects by the given edx_video_id in the video_data. Args: video_data (dict): { url: api url to the video edx_video_id: ID of the video duration: Length of video in seconds client_video_id: client ID of video encoded_video: a list of EncodedVideo dicts url: url of the video file_size: size of the video in bytes profile: ID of the profile courses: Courses associated with this video } Raises: Raises ValVideoNotFoundError if the video cannot be retrieved. Raises ValCannotUpdateError if the video cannot be updated. Returns the successfully updated Video object
20,983
def build_save_containers(platforms, registry, load_cache) -> int: from joblib import Parallel, delayed if len(platforms) == 0: return 0 platform_results = Parallel(n_jobs=PARALLEL_BUILDS, backend="multiprocessing")( delayed(_build_save_container)(platform, registry, load_cache) for platform in platforms) is_error = False for platform_result in platform_results: if platform_result is not None: logging.error(, platform_result) is_error = True return 1 if is_error else 0
Entry point to build and upload all built dockerimages in parallel :param platforms: List of platforms :param registry: Docker registry name :param load_cache: Load cache before building :return: 1 if error occurred, 0 otherwise
20,984
def cancelPnL(self, account, modelCode: str = ): key = (account, modelCode) reqId = self.wrapper.pnlKey2ReqId.pop(key, None) if reqId: self.client.cancelPnL(reqId) self.wrapper.pnls.pop(reqId, None) else: self._logger.error( f)
Cancel PnL subscription. Args: account: Cancel for this account. modelCode: If specified, cancel for this account model.
20,985
def complete_object_value( self, return_type: GraphQLObjectType, field_nodes: List[FieldNode], info: GraphQLResolveInfo, path: ResponsePath, result: Any, ) -> AwaitableOrValue[Dict[str, Any]]: if return_type.is_type_of: is_type_of = return_type.is_type_of(result, info) if isawaitable(is_type_of): async def collect_and_execute_subfields_async(): if not await is_type_of: raise invalid_return_type_error( return_type, result, field_nodes ) return self.collect_and_execute_subfields( return_type, field_nodes, path, result ) return collect_and_execute_subfields_async() if not is_type_of: raise invalid_return_type_error(return_type, result, field_nodes) return self.collect_and_execute_subfields( return_type, field_nodes, path, result )
Complete an Object value by executing all sub-selections.
20,986
def _get_html_contents(html): parser = MyHTMLParser() parser.feed(html) if parser.is_code: return (, parser.data.strip()) elif parser.is_math: return (, parser.data.strip()) else: return ,
Process a HTML block and detects whether it is a code block, a math block, or a regular HTML block.
20,987
def remove_file(self, filepath): self._models.pop(filepath) self._updates.pop(filepath, default=None) self.signalModelDestroyed.emit(filepath)
Removes the DataFrameModel from being registered. :param filepath: (str) The filepath to delete from the DataFrameModelManager. :return: None
20,988
def to_bool(value): bool_value = False if str(value).lower() in [, ]: bool_value = True return bool_value
Convert string value to bool.
20,989
def _from_string(cls, serialized): parse = cls.URL_RE.match(serialized) if not parse: raise InvalidKeyError(cls, serialized) parse = parse.groupdict() if parse[]: parse[] = cls.as_object_id(parse[]) return cls(**{key: parse.get(key) for key in cls.KEY_FIELDS})
Return a DefinitionLocator parsing the given serialized string :param serialized: matches the string to
20,990
def get_releasenotes(project_dir=os.curdir, bugtracker_url=): releasenotes = pkg_info_file = os.path.join(project_dir, ) releasenotes_file = os.path.join(project_dir, ) if os.path.exists(pkg_info_file) and os.path.exists(releasenotes_file): with open(releasenotes_file) as releasenotes_fd: releasenotes = releasenotes_fd.read() else: releasenotes = api.get_releasenotes( repo_path=project_dir, bugtracker_url=bugtracker_url, ) return releasenotes
Retrieves the release notes, from the RELEASE_NOTES file (if in a package) or generates it from the git history. Args: project_dir(str): Path to the git repo of the project. bugtracker_url(str): Url to the bug tracker for the issues. Returns: str: release notes Raises: RuntimeError: If the release notes could not be retrieved
20,991
def mset_list(item, index, value): if isinstance(index, (int, slice)): item[index] = value else: map(item.__setitem__, index, value)
set mulitple items via index of int, slice or list
20,992
def addNoise(vecs, percent=0.1, n=2048): noisyVecs = [] for vec in vecs: nv = vec.copy() for idx in vec: if numpy.random.random() <= percent: nv.discard(idx) nv.add(numpy.random.randint(n)) noisyVecs.append(nv) return noisyVecs
Add noise to the given sequence of vectors and return the modified sequence. A percentage of the on bits are shuffled to other locations.
20,993
def list_quota_volume(name): * cmd = .format(name) cmd += root = _gluster_xml(cmd) if not _gluster_ok(root): return None ret = {} for limit in _iter(root, ): path = limit.find().text ret[path] = _etree_to_dict(limit) return ret
List quotas of glusterfs volume name Name of the gluster volume CLI Example: .. code-block:: bash salt '*' glusterfs.list_quota_volume <volume>
20,994
def _map_content_types(archetype_tool, catalogs_definition): ct_map = {} to_reindex = [] map_types = archetype_tool.catalog_map for catalog_id in catalogs_definition.keys(): catalog_info = catalogs_definition.get(catalog_id, {}) types = catalog_info.get(, []) for t in types: tmp_l = ct_map.get(t, []) tmp_l.append(catalog_id) ct_map[t] = tmp_l for t in ct_map.keys(): catalogs_list = ct_map[t] perv_catalogs_list = archetype_tool.catalog_map.get(t, []) set1 = set(catalogs_list) set2 = set(perv_catalogs_list) if set1 != set2: archetype_tool.setCatalogsByType(t, catalogs_list) to_reindex = to_reindex + list(set1 - set2) + list(set2 - set1) return to_reindex
Updates the mapping for content_types against catalogs :archetype_tool: an archetype_tool object :catalogs_definition: a dictionary like { CATALOG_ID: { 'types': ['ContentType', ...], 'indexes': { 'UID': 'FieldIndex', ... }, 'columns': [ 'Title', ... ] } }
20,995
def is_device_connected(self, ip): all_devices = self.get_all_connected_devices() for device in all_devices: if ip == device[]: return device[] == 1 return False
Check if a device identified by it IP is connected to the box :param ip: IP of the device you want to test :type ip: str :return: True is the device is connected, False if it's not :rtype: bool
20,996
def configure_room(self, form): if form.type == "cancel": return None elif form.type != "submit": raise ValueError("A form required to configure a room") iq = Iq(to_jid = self.room_jid.bare(), stanza_type = "set") query = iq.new_query(MUC_OWNER_NS, "query") form.as_xml(query) self.manager.stream.set_response_handlers( iq, self.process_configuration_success, self.process_configuration_error) self.manager.stream.send(iq) return iq.get_id()
Configure the room using the provided data. Do nothing if the provided form is of type 'cancel'. :Parameters: - `form`: the configuration parameters. Should be a 'submit' form made by filling-in the configuration form retireved using `self.request_configuration_form` or a 'cancel' form. :Types: - `form`: `Form` :return: id of the request stanza or `None` if a 'cancel' form was provieded. :returntype: `unicode`
20,997
def insertOutputConfig(self, businput): if not ("app_name" in businput and "release_version" in businput\ and "pset_hash" in businput and "output_module_label" in businput and "global_tag" in businput): dbsExceptionHandler(, "business/DBSOutputConfig/insertOutputConfig require:\ app_name, release_version, pset_hash, output_module_label and global_tag") conn = self.dbi.connection() tran = conn.begin() try: businput[] = businput.get("scenario", None) businput[] = businput.get("pset_name", None) self.outmodin.execute(conn, businput, tran) tran.commit() tran = None except SQLAlchemyIntegrityError as ex: if str(ex).find("unique constraint") != -1 or str(ex).lower().find("duplicate") != -1: if str(ex).find("TUC_OMC_1") != -1: pass else: try: self.outmodin.execute(conn, businput, tran) tran.commit() tran = None except SQLAlchemyIntegrityError as ex1: if str(ex1).find("unique constraint") != -1 and str(ex1).find("TUC_OMC_1") != -1: pass except Exception as e1: if tran: tran.rollback() tran = None raise else: raise except Exception as e: if tran: tran.rollback() raise finally: if tran: tran.rollback() if conn: conn.close()
Method to insert the Output Config. app_name, release_version, pset_hash, global_tag and output_module_label are required. args: businput(dic): input dictionary. Updated Oct 12, 2011
20,998
def import_csv(file_name, **kwargs): sep = kwargs.get(, ",") content = exch.read_file(file_name, skip_lines=1) return exch.import_text_data(content, sep)
Reads control points from a CSV file and generates a 1-dimensional list of control points. It is possible to use a different value separator via ``separator`` keyword argument. The following code segment illustrates the usage of ``separator`` keyword argument. .. code-block:: python :linenos: # By default, import_csv uses 'comma' as the value separator ctrlpts = exchange.import_csv("control_points.csv") # Alternatively, it is possible to import a file containing tab-separated values ctrlpts = exchange.import_csv("control_points.csv", separator="\\t") The only difference of this function from :py:func:`.exchange.import_txt()` is skipping the first line of the input file which generally contains the column headings. :param file_name: file name of the text file :type file_name: str :return: list of control points :rtype: list :raises GeomdlException: an error occurred reading the file
20,999
def points_from_x0y0x1y1(xyxy): x0 = xyxy[0] y0 = xyxy[1] x1 = xyxy[2] y1 = xyxy[3] return "%s,%s %s,%s %s,%s %s,%s" % ( x0, y0, x1, y0, x1, y1, x0, y1 )
Constructs a polygon representation from a rectangle described as a list [x0, y0, x1, y1]