Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
8,900
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2): if depth < 1: raise ValueError("Depth cannot be smaller than 1 but got %d." % depth) if self.getNumPartitions() == 0: return zeroValue def aggregatePartition(iterator): acc = zeroValue for obj in iterator: acc = seqOp(acc, obj) yield acc partiallyAggregated = self.mapPartitions(aggregatePartition) numPartitions = partiallyAggregated.getNumPartitions() scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2) while numPartitions > scale + numPartitions / scale: numPartitions /= scale curNumPartitions = int(numPartitions) def mapPartition(i, iterator): for obj in iterator: yield (i % curNumPartitions, obj) partiallyAggregated = partiallyAggregated \ .mapPartitionsWithIndex(mapPartition) \ .reduceByKey(combOp, curNumPartitions) \ .values() return partiallyAggregated.reduce(combOp)
Aggregates the elements of this RDD in a multi-level tree pattern. :param depth: suggested depth of the tree (default: 2) >>> add = lambda x, y: x + y >>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10) >>> rdd.treeAggregate(0, add, add) -5 >>> rdd.treeAggregate(0, add, add, 1) -5 >>> rdd.treeAggregate(0, add, add, 2) -5 >>> rdd.treeAggregate(0, add, add, 5) -5 >>> rdd.treeAggregate(0, add, add, 10) -5
8,901
def spkopa(filename): filename = stypes.stringToCharP(filename) handle = ctypes.c_int() libspice.spkopa_c(filename, ctypes.byref(handle)) return handle.value
Open an existing SPK file for subsequent write. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkopa_c.html :param filename: The name of an existing SPK file. :type filename: str :return: A handle attached to the SPK file opened to append. :rtype: int
8,902
def __execute_scale(self, surface, size_to_scale_from): x = size_to_scale_from[0] * self.__scale[0] y = size_to_scale_from[1] * self.__scale[1] scaled_value = (int(x), int(y)) self.image = pygame.transform.scale(self.image, scaled_value) self.__resize_surface_extents()
Execute the scaling operation
8,903
def run_tasks(cls): now = timezone.now() tasks = cls.objects.filter(enabled=True) for task in tasks: if task.next_run == HAS_NOT_RUN: task.calc_next_run() if task.next_run < now: if (task.start_running < now): if (task.end_running > now): task.run_asap() else: task.enabled = False task.save() Channel(KILL_TASK_CHANNEL).send({: task.pk})
Internal task-runner class method, called by :py:func:`sisy.consumers.run_heartbeat`
8,904
def transform_streams_for_comparison(outputs): new_outputs = [] for output in outputs: if (output.output_type == ): new_outputs.append({ : , output.name: output.text, }) else: new_outputs.append(output) return new_outputs
Makes failure output for streams better by having key be the stream name
8,905
def return_secondary_learner(self): estimator = self.base_learner_origin.return_estimator() estimator = estimator.set_params(**self.secondary_learner_hyperparameters) return estimator
Returns secondary learner using its origin and the given hyperparameters Returns: est (estimator): Estimator object
8,906
def ltrimboth (l,proportiontocut): lowercut = int(proportiontocut*len(l)) uppercut = len(l) - lowercut return l[lowercut:uppercut]
Slices off the passed proportion of items from BOTH ends of the passed list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost' 10% of scores. Assumes list is sorted by magnitude. Slices off LESS if proportion results in a non-integer slice index (i.e., conservatively slices off proportiontocut). Usage: ltrimboth (l,proportiontocut) Returns: trimmed version of list l
8,907
def get_host_cache(service_instance=None): * ret_dict = {} host_ref = _get_proxy_target(service_instance) hostname = __proxy__[]()[] hci = salt.utils.vmware.get_host_cache(host_ref) if not hci: log.debug(%s\, hostname) ret_dict[] = False return ret_dict return {: True, : {: hci.key.name}, : .format(hci.swapSize)}
Returns the host cache configuration on the proxy host. service_instance Service instance (vim.ServiceInstance) of the vCenter/ESXi host. Default is None. .. code-block:: bash salt '*' vsphere.get_host_cache
8,908
def get_html_text_editor( name, id=None, content=, textual_content=None, width=, height=, enabled=True, file_upload_url=None, toolbar_set="Basic", custom_configurations_path=, ln=None): ln = default_ln(ln) if textual_content is None: textual_content = content editor = if enabled and ckeditor_available: file_upload_script = if file_upload_url is not None: file_upload_script = %(file_upload_url)s%(file_upload_url)s?type=Image%(file_upload_url)s?type=Flash % {: file_upload_url} editor += <script type="text/javascript" src="%(CFG_SITE_URL)s/vendors/ckeditor/ckeditor.js"><\/script>/ckeditor/%(name)s%(custom_configurations_path)s%(toolbar)s%(width)s%(height)s%(ln)sinstanceReadyhtmlvalueeditortypeckeditorcontentDom % \ {: cgi.escape(textual_content), : content, : width, : height, : name, : id or name, : custom_configurations_path, : toolbar_set, : file_upload_script, : cfg[], : ln} else: textarea = \ % {: cgi.escape(textual_content), : width, : height, : name, : id and ( % id) or } editor += textarea editor += return editor
Returns a wysiwyg editor (CKEditor) to embed in html pages. Fall back to a simple textarea when the library is not installed, or when the user's browser is not compatible with the editor, or when 'enable' is False, or when javascript is not enabled. NOTE that the output also contains a hidden field named 'editor_type' that contains the kind of editor used, 'textarea' or 'ckeditor'. Based on 'editor_type' you might want to take different actions, like replace CRLF with <br/> when editor_type equals to 'textarea', but not when editor_type equals to 'ckeditor'. @param name: *str* the name attribute of the returned editor @param id: *str* the id attribute of the returned editor (when applicable) @param content: *str* the default content of the editor. @param textual_content: *str* a content formatted for the case where the wysiwyg editor is not available for user. When not specified, use value of 'content' @param width: *str* width of the editor in an html compatible unit: Eg: '400px', '50%'. @param height: *str* height of the editor in an html compatible unit: Eg: '400px', '50%'. @param enabled: *bool* if the wysiwyg editor is return (True) or if a simple texteara is returned (False) @param file_upload_url: *str* the URL used to upload new files via the editor upload panel. You have to implement the handler for your own use. The URL handler will get form variables 'File' as POST for the uploaded file, and 'Type' as GET for the type of file ('file', 'image', 'flash', 'media') When value is not given, the file upload is disabled. @param toolbar_set: *str* the name of the toolbar layout to use. CKeditor comes by default with 'Basic' and 'Default'. To define other sets, customize the config file in /opt/cds-invenio/var/www/ckeditor/invenio-ckconfig.js @param custom_configurations_path: *str* value for the CKeditor config variable 'CustomConfigurationsPath', which allows to specify the path of a file that contains a custom configuration for the editor. The path is relative to /opt/invenio/var/www/ @return: the HTML markup of the editor
8,909
def switch_toggle(context, ain): context.obj.login() actor = context.obj.get_actor_by_ain(ain) if actor: if actor.get_state(): actor.switch_off() click.echo("State for {} is now OFF".format(ain)) else: actor.switch_on() click.echo("State for {} is now ON".format(ain)) else: click.echo("Actor not found: {}".format(ain))
Toggle an actor's power state
8,910
def _remove_event_source(awsclient, evt_source, lambda_arn): event_source_obj = _get_event_source_obj(awsclient, evt_source) if event_source_obj.exists(lambda_arn): event_source_obj.remove(lambda_arn)
Given an event_source dictionary, create the object and remove the event source.
8,911
def systemd( state, host, name, running=True, restarted=False, reloaded=False, command=None, enabled=None, daemon_reload=False, ): if daemon_reload: yield yield _handle_service_control( name, host.fact.systemd_status, , running, restarted, reloaded, command, ) if isinstance(enabled, bool): is_enabled = host.fact.systemd_enabled.get(name, False)
Manage the state of systemd managed services. + name: name of the service to manage + running: whether the service should be running + restarted: whether the service should be restarted + reloaded: whether the service should be reloaded + command: custom command to pass like: ``/etc/rc.d/<name> <command>`` + enabled: whether this service should be enabled/disabled on boot + daemon_reload: reload the systemd daemon to read updated unit files
8,912
def validateDocumentFinal(self, ctxt): if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o ret = libxml2mod.xmlValidateDocumentFinal(ctxt__o, self._o) return ret
Does the final step for the document validation once all the incremental validation steps have been completed basically it does the following checks described by the XML Rec Check all the IDREF/IDREFS attributes definition for validity
8,913
def get_serializer(self, instance=None, data=None, many=False, partial=False): serializers = { : NodeRequestListSerializer, : VoteRequestListSerializer, : CommentRequestListSerializer, : RatingRequestListSerializer, } context = self.get_serializer_context() service_code = context[].query_params.get(, ) if service_code not in serializers.keys(): serializer_class = self.get_serializer_class() else: serializer_class = serializers[service_code] return serializer_class(instance, many=many, partial=partial, context=context)
Return the serializer instance that should be used for validating and deserializing input, and for serializing output.
8,914
def raise_(type_, value=None, traceback=None): if type_.__traceback__ is not traceback: raise type_.with_traceback(traceback) raise type_
Does the same as ordinary ``raise`` with arguments do in Python 2. But works in Python 3 (>= 3.3) also! Please checkout README on https://github.com/9seconds/pep3134 to get an idea about possible pitfals. But short story is: please be pretty carefull with tracebacks. If it is possible, use sys.exc_info instead. But in most cases it will work as you expect.
8,915
def FDMT(data, f_min, f_max, maxDT, dataType): nint, nbl, nchan, npol = data.shape niters = int(np.log2(nchan)) assert nchan in 2**np.arange(30) and nint in 2**np.arange(30), "Input dimensions must be a power of 2" logger.info(.format(data.shape)) data = FDMT_initialization(data, f_min, f_max, maxDT, dataType) logger.info(.format(niters, maxDT)) for i_t in range(1, niters+1): data = FDMT_iteration(data, maxDT, nchan, f_min, f_max, i_t, dataType) [nint, dT, nbl, nchan, npol] = data.shape assert nchan == 1, return np.rollaxis(data[:,:,:,0,:], 1)
This function implements the FDMT algorithm. Input: Input visibility array (nints, nbl, nchan, npol) f_min,f_max are the base-band begin and end frequencies. The frequencies should be entered in MHz maxDT - the maximal delay (in time bins) of the maximal dispersion. Appears in the paper as N_{\Delta} A typical input is maxDT = N_f dataType - a valid numpy dtype. reccomended: either int32, or int64. Output: The dispersion measure transform of the Input matrix. The output dimensions are [Input.shape[1],maxDT] For details, see algorithm 1 in Zackay & Ofek (2014)
8,916
def split_no_wd_params(layer_groups:Collection[nn.Module])->List[List[nn.Parameter]]: "Separate the parameters in `layer_groups` between `no_wd_types` and bias (`bias_types`) from the rest." split_params = [] for l in layer_groups: l1,l2 = [],[] for c in l.children(): if isinstance(c, no_wd_types): l2 += list(trainable_params(c)) elif isinstance(c, bias_types): bias = c.bias if hasattr(c, ) else None l1 += [p for p in trainable_params(c) if not (p is bias)] if bias is not None: l2.append(bias) else: l1 += list(trainable_params(c)) l1,l2 = uniqueify(l1),uniqueify(l2) split_params += [l1, l2] return split_params
Separate the parameters in `layer_groups` between `no_wd_types` and bias (`bias_types`) from the rest.
8,917
async def ensure_process(self): with (await self.state[]): if not in self.state: cmd = self.get_cmd() server_env = os.environ.copy() server_env.update(self.get_env()) timeout = self.get_timeout() proc = SupervisedProcess(self.name, *cmd, env=server_env, ready_func=self._http_ready_func, ready_timeout=timeout, log=self.log) self.state[] = proc try: await proc.start() is_ready = await proc.ready() if not is_ready: await proc.kill() raise web.HTTPError(500, .format(self.name)) except: del self.state[] raise
Start the process
8,918
def load_auth_from_file(filename): with open(filename) as auth_file: lines = auth_file.read().splitlines() lines = [line.strip() for line in lines if len(line) != 0] if len(lines) == 2: credentials = (lines[0], lines[1]) elif len(lines) == 1: user_pass = lines[0].split() credentials = (user_pass[0], user_pass[1]) elif len(lines) == 0 or len(lines) > 2: raise ValueError(constants.INVALID_AUTH_FILE) if helpers.verif_auth(credentials, header): return credentials else: raise ValueError(constants.INVALID_CREDENTIALS)
Initializes the auth settings for accessing MyAnimelist through its official API from a given filename. :param filename The name of the file containing your MyAnimeList credentials REQUIREMENTS: The file must... ...username for your MAL account. ...password for your MAL account. ...Have both your username and password ...separated by newline(s) or space(s). :return A tuple containing your credentials.
8,919
def setFixedHeight(self, height): super(XViewPanelItem, self).setFixedHeight(height) self._dragLabel.setFixedHeight(height) self._titleLabel.setFixedHeight(height) self._searchButton.setFixedHeight(height) self._closeButton.setFixedHeight(height)
Sets the fixed height for this item to the inputed height amount. :param height | <int>
8,920
def rpc_call(self, request, method=None, params=None, **kwargs): args = [] kwargs = dict() if isinstance(params, dict): kwargs.update(params) else: args = list(as_tuple(params)) method_key = "{0}.{1}".format(self.scheme_name, method) if method_key not in self.methods: raise AssertionError("Unknown method: {0}".format(method)) method = self.methods[method_key] if hasattr(method, ): args.insert(0, request) return method(*args, **kwargs)
Call a RPC method. return object: a result
8,921
def play(self, call_params): path = + self.api_version + method = return self.request(path, method, call_params)
REST Play something on a Call Helper
8,922
def sub_article_folders(self): l = list() for p in Path.sort_by_fname( Path(self.dir_path).select_dir(recursive=False) ): af = ArticleFolder(dir_path=p.abspath) try: if af.title is not None: l.append(af) except: pass return l
Returns all valid ArticleFolder sitting inside of :attr:`ArticleFolder.dir_path`.
8,923
def importance(self, attribute, examples): gain_counter = OnlineInformationGain(attribute, self.target) for example in examples: gain_counter.add(example) return gain_counter.get_gain()
AIMA implies that importance should be information gain. Since AIMA only defines it for binary features this implementation was based on the wikipedia article: http://en.wikipedia.org/wiki/Information_gain_in_decision_trees
8,924
def DEFAULT_RENAMER(L, Names=None): if isinstance(L,dict): Names = L.keys() LL = L.values() else: if Names == None: Names = range(len(L)) else: assert len(Names) == len(L) LL = L commons = Commons([l.dtype.names for l in LL]) D = {} for (i,l) in zip(Names, LL): d = {} for c in commons: if c in l.dtype.names: d[c] = c + + str(i) if d: D[i] = d return D
Renames overlapping column names of numpy ndarrays with structured dtypes Rename the columns by using a simple convention: * If `L` is a list, it will append the number in the list to the key associated with the array. * If `L` is a dictionary, the algorithm will append the string representation of the key associated with an array to the overlapping columns from that array. Default renamer function used by :func:`tabular.spreadsheet.join` **Parameters** **L** : list or dictionary Numpy recarrays with columns to be renamed. **Returns** **D** : dictionary of dictionaries Dictionary mapping each input numpy recarray to a dictionary mapping each original column name to its new name following the convention above.
8,925
def request_issuance(self, csr): action = LOG_ACME_REQUEST_CERTIFICATE() with action.context(): return ( DeferredContext( self._client.post( self.directory[csr], csr, content_type=DER_CONTENT_TYPE, headers=Headers({b: [DER_CONTENT_TYPE]}))) .addCallback(self._expect_response, http.CREATED) .addCallback(self._parse_certificate) .addActionFinish())
Request a certificate. Authorizations should have already been completed for all of the names requested in the CSR. Note that unlike `acme.client.Client.request_issuance`, the certificate resource will have the body data as raw bytes. .. seealso:: `txacme.util.csr_for_names` .. todo:: Delayed issuance is not currently supported, the server must issue the requested certificate immediately. :param csr: A certificate request message: normally `txacme.messages.CertificateRequest` or `acme.messages.CertificateRequest`. :rtype: Deferred[`acme.messages.CertificateResource`] :return: The issued certificate.
8,926
def build(self, tag, **kwargs): self.push_log("Building image .".format(tag)) set_raise_on_error(kwargs) try: return super(DockerFabricClient, self).build(tag, **kwargs) except DockerStatusError as e: error(e.message)
Identical to :meth:`dockermap.client.base.DockerClientWrapper.build` with additional logging.
8,927
def print_math(math_expression_lst, name = "math.html", out=, formatter = lambda x: x): try: shutil.rmtree() except: pass pth = get_cur_path()+print_math_template_path shutil.copytree(pth, ) html_loc = None if out == "html": html_loc = pth+"standalone_index.html" if out == "notebook": from IPython.display import display, HTML html_loc = pth+"notebook_index.html" html = open(html_loc).read() html = html.replace("__MATH_LIST__", json.dumps(math_expression_lst)) if out == "notebook": display(HTML(html)) elif out == "html": with open(name, "w+") as out_f: out_f.write(html)
Converts LaTeX math expressions into an html layout. Creates a html file in the directory where print_math is called by default. Displays math to jupyter notebook if "notebook" argument is specified. Args: math_expression_lst (list): A list of LaTeX math (string) to be rendered by KaTeX out (string): {"html"|"notebook"}: HTML by default. Specifies output medium. formatter (function): function that cleans up the string for KaTeX. Returns: A HTML file in the directory where this function is called, or displays HTML output in a notebook.
8,928
def generateXY(self, **kwargs): print(" .format(self.fnamenoext, self.wcs.extname, util._ptime()[0])) if self.pars[]: sigma = self._compute_sigma() else: sigma = self.pars[] skymode = sigma**2 log.info(%sigma) if self.pars[] in [None,"INDEF",""," "]: hmin = skymode else: hmin = sigma*self.pars[] if in kwargs and kwargs[] is not None: dqmask = np.asarray(kwargs[], dtype=bool) else: dqmask = None mask = self._combine_exclude_mask(dqmask) x, y, flux, src_id, sharp, round1, round2 = tweakutils.ndfind( self.source, hmin, self.pars[], skymode, sharplim=[self.pars[],self.pars[]], roundlim=[self.pars[],self.pars[]], peakmin=self.pars[], peakmax=self.pars[], fluxmin=self.pars[], fluxmax=self.pars[], nsigma=self.pars[], ratio=self.pars[], theta=self.pars[], mask=mask, use_sharp_round=self.use_sharp_round, nbright=self.nbright ) if len(x) == 0: if not self.pars[]: sigma = self._compute_sigma() hmin = sigma * self.pars[] log.info() x, y, flux, src_id, sharp, round1, round2 = tweakutils.ndfind( self.source, hmin, self.pars[], skymode, sharplim=[self.pars[],self.pars[]], roundlim=[self.pars[],self.pars[]], peakmin=self.pars[], peakmax=self.pars[], fluxmin=self.pars[], fluxmax=self.pars[], nsigma=self.pars[], ratio=self.pars[], theta=self.pars[], mask = mask, use_sharp_round = self.use_sharp_round, nbright=self.nbright ) if len(x) == 0: xypostypes = 3*[float]+[int]+(3 if self.use_sharp_round else 0)*[float] self.xypos = [np.empty(0, dtype=i) for i in xypostypes] warnstr = textutil.textbox(+ ) for line in warnstr.split(): log.warning(line) print(warnstr) else: if self.use_sharp_round: self.xypos = [x+1, y+1, flux, src_id+self.start_id, sharp, round1, round2] else: self.xypos = [x+1, y+1, flux, src_id+self.start_id] log.info(%(util._ptime()[0])) self.in_units = self.sharp = sharp self.round1 = round1 self.round2 = round2 self.numcols = 7 if self.use_sharp_round else 4 self.num_objects = len(x) self._apply_flux_limits = False
Generate source catalog from input image using DAOFIND-style algorithm
8,929
def to_line_string(self, closed=True): from imgaug.augmentables.lines import LineString if not closed or len(self.exterior) <= 1: return LineString(self.exterior, label=self.label) return LineString( np.concatenate([self.exterior, self.exterior[0:1, :]], axis=0), label=self.label)
Convert this polygon's `exterior` to a ``LineString`` instance. Parameters ---------- closed : bool, optional Whether to close the line string, i.e. to add the first point of the `exterior` also as the last point at the end of the line string. This has no effect if the polygon has a single point or zero points. Returns ------- imgaug.augmentables.lines.LineString Exterior of the polygon as a line string.
8,930
def open(safe_file): if os.path.isdir(safe_file) or os.path.isfile(safe_file): return SentinelDataSet(safe_file) else: raise IOError("file not found: %s" % safe_file)
Return a SentinelDataSet object.
8,931
def quaternion_from_euler(angles, order=): angles = np.asarray(angles, dtype=float) quat = quaternion_from_axis_rotation(angles[0], order[0])\ * (quaternion_from_axis_rotation(angles[1], order[1]) * quaternion_from_axis_rotation(angles[2], order[2])) quat.normalize(inplace=True) return quat
Generate a quaternion from a set of Euler angles. Args: angles (array_like): Array of Euler angles. order (str): Order of Euler rotations. 'yzy' is default. Returns: Quaternion: Quaternion representation of Euler rotation.
8,932
def _extract_from_url(self, url): m = re.search(re_pub_date, url) if m: return self.parse_date_str(m.group(0)) return None
Try to extract from the article URL - simple but might work as a fallback
8,933
def checkout(self, ref, cb=None): if self.is_api: return self._checkout_api(ref, cb=cb) else: return self._checkout_fs(ref, cb=cb)
Checkout a bundle from the remote. Returns a file-like object
8,934
def fromrandom(shape=(10, 50, 50), npartitions=1, seed=42, engine=None): seed = hash(seed) def generate(v): random.seed(seed + v) return random.randn(*shape[1:]) return fromlist(range(shape[0]), accessor=generate, npartitions=npartitions, engine=engine)
Generate random image data. Parameters ---------- shape : tuple, optional, default=(10, 50, 50) Dimensions of images. npartitions : int, optional, default=1 Number of partitions. seed : int, optional, default=42 Random seed.
8,935
def draw(self, scr): numHeaderRows = 1 scr.erase() vd().refresh() if not self.columns: return color_current_row = CursesAttr(colors.color_current_row, 5) disp_column_sep = options.disp_column_sep rowattrs = {} colattrs = {} isNull = isNullFunc() self.rowLayout = {} self.calcColLayout() vcolidx = 0 rows = list(self.rows[self.topRowIndex:self.topRowIndex+self.nVisibleRows]) for vcolidx, colinfo in sorted(self.visibleColLayout.items()): x, colwidth = colinfo col = self.visibleCols[vcolidx] if x < self.vd.windowWidth: headerRow = 0 self.drawColHeader(scr, headerRow, vcolidx) y = headerRow + numHeaderRows for rowidx in range(0, min(len(rows), self.nVisibleRows)): dispRowIdx = self.topRowIndex + rowidx if dispRowIdx >= self.nRows: break self.rowLayout[dispRowIdx] = y row = rows[rowidx] cellval = col.getCell(row, colwidth-1) try: if isNull(cellval.value): cellval.note = options.disp_note_none cellval.notecolor = except TypeError: pass attr = self.colorize(col, row, cellval) rowattr = rowattrs.get(rowidx) if rowattr is None: rowattr = rowattrs[rowidx] = self.colorize(None, row) sepattr = rowattr if dispRowIdx == self.cursorRowIndex: attr = attr.update_attr(color_current_row) sepattr = sepattr.update_attr(color_current_row) note = getattr(cellval, , None) if note: noteattr = attr.update_attr(colors.get_color(cellval.notecolor), 10) clipdraw(scr, y, x+colwidth-len(note), note, noteattr.attr, len(note)) clipdraw(scr, y, x, disp_column_fill+cellval.display, attr.attr, colwidth-(1 if note else 0)) vd.onMouse(scr, y, x, 1, colwidth, BUTTON3_RELEASED=) sepchars = disp_column_sep if (self.keyCols and col is self.keyCols[-1]) or vcolidx == self.rightVisibleColIndex: sepchars = options.disp_keycol_sep if x+colwidth+len(sepchars) <= self.vd.windowWidth: scr.addstr(y, x+colwidth, sepchars, sepattr.attr) y += 1 if vcolidx+1 < self.nVisibleCols: scr.addstr(headerRow, self.vd.windowWidth-2, options.disp_more_right, colors.color_column_sep) catchapply(self.checkCursor)
Draw entire screen onto the `scr` curses object.
8,936
def hashify_files(files: list) -> dict: return {filepath.replace(, ): hash_tree(filepath) for filepath in listify(files)}
Return mapping from file path to file hash.
8,937
def all_referenced_targets(self, result): if __debug__: from .property import Property assert is_iterable_typed(result, (VirtualTarget, Property)) deps = self.build_properties().dependency() all_targets = self.sources_ + deps r = [] for e in all_targets: if not e in result: result.add(e) if isinstance(e, property.Property): t = e.value else: t = e cs = t.creating_subvariant() if cs: r.append(cs) r = unique(r) for s in r: if s != self: s.all_referenced_targets(result)
Returns all targets referenced by this subvariant, either directly or indirectly, and either as sources, or as dependency properties. Targets referred with dependency property are returned a properties, not targets.
8,938
def makevAndvPfuncs(self,policyFunc): mCount = self.aXtraGrid.size pCount = self.pLvlGrid.size MedCount = self.MedShkVals.size temp_grid = np.tile(np.reshape(self.aXtraGrid,(mCount,1,1)),(1,pCount,MedCount)) aMinGrid = np.tile(np.reshape(self.mLvlMinNow(self.pLvlGrid),(1,pCount,1)), (mCount,1,MedCount)) pGrid = np.tile(np.reshape(self.pLvlGrid,(1,pCount,1)),(mCount,1,MedCount)) mGrid = temp_grid*pGrid + aMinGrid if self.pLvlGrid[0] == 0: mGrid[:,0,:] = np.tile(np.reshape(self.aXtraGrid,(mCount,1)),(1,MedCount)) MedShkGrid = np.tile(np.reshape(self.MedShkVals,(1,1,MedCount)),(mCount,pCount,1)) probsGrid = np.tile(np.reshape(self.MedShkPrbs,(1,1,MedCount)),(mCount,pCount,1)) cGrid,MedGrid = policyFunc(mGrid,pGrid,MedShkGrid) if self.vFuncBool: MedGrid = np.maximum(MedGrid,1e-100) aGrid = np.maximum(mGrid - cGrid - self.MedPrice*MedGrid, aMinGrid) vGrid = self.u(cGrid) + MedShkGrid*self.uMed(MedGrid) + self.EndOfPrdvFunc(aGrid,pGrid) vNow = np.sum(vGrid*probsGrid,axis=2) vPgrid = self.uP(cGrid) vPnow = np.sum(vPgrid*probsGrid,axis=2) mGrid_small = np.concatenate((np.reshape(self.mLvlMinNow(self.pLvlGrid),(1,pCount)),mGrid[:,:,0])) vPnvrsNow = np.concatenate((np.zeros((1,pCount)),self.uPinv(vPnow))) if self.vFuncBool: vNvrsNow = np.concatenate((np.zeros((1,pCount)),self.uinv(vNow)),axis=0) vNvrsPnow = vPnow*self.uinvP(vNow) vNvrsPnow = np.concatenate((np.zeros((1,pCount)),vNvrsPnow),axis=0) vPnvrsFunc_by_pLvl = [] vNvrsFunc_by_pLvl = [] for j in range(pCount): pLvl = self.pLvlGrid[j] m_temp = mGrid_small[:,j] - self.mLvlMinNow(pLvl) vPnvrs_temp = vPnvrsNow[:,j] vPnvrsFunc_by_pLvl.append(LinearInterp(m_temp,vPnvrs_temp)) if self.vFuncBool: vNvrs_temp = vNvrsNow[:,j] vNvrsP_temp = vNvrsPnow[:,j] vNvrsFunc_by_pLvl.append(CubicInterp(m_temp,vNvrs_temp,vNvrsP_temp)) vPnvrsFuncBase = LinearInterpOnInterp1D(vPnvrsFunc_by_pLvl,self.pLvlGrid) vPnvrsFunc = VariableLowerBoundFunc2D(vPnvrsFuncBase,self.mLvlMinNow) if self.vFuncBool: vNvrsFuncBase = LinearInterpOnInterp1D(vNvrsFunc_by_pLvl,self.pLvlGrid) vNvrsFunc = VariableLowerBoundFunc2D(vNvrsFuncBase,self.mLvlMinNow) vPfunc = MargValueFunc2D(vPnvrsFunc,self.CRRA) if self.vFuncBool: vFunc = ValueFunc2D(vNvrsFunc,self.CRRA) else: vFunc = NullFunc() return vFunc, vPfunc
Constructs the marginal value function for this period. Parameters ---------- policyFunc : function Consumption and medical care function for this period, defined over market resources, permanent income level, and the medical need shock. Returns ------- vFunc : function Value function for this period, defined over market resources and permanent income. vPfunc : function Marginal value (of market resources) function for this period, defined over market resources and permanent income.
8,939
def statisticalInefficiency(A_n, B_n=None, fast=False, mintime=3, fft=False): A_n = np.array(A_n) if fft and B_n is None: return statisticalInefficiency_fft(A_n, mintime=mintime) if B_n is not None: B_n = np.array(B_n) else: B_n = np.array(A_n) N = A_n.size if(A_n.shape != B_n.shape): raise ParameterError() g = 1.0 mu_A = A_n.mean() mu_B = B_n.mean() dA_n = A_n.astype(np.float64) - mu_A dB_n = B_n.astype(np.float64) - mu_B sigma2_AB = (dA_n * dB_n).mean() if(sigma2_AB == 0): raise ParameterError() t = 1 increment = 1 while (t < N - 1): C = np.sum(dA_n[0:(N - t)] * dB_n[t:N] + dB_n[0:(N - t)] * dA_n[t:N]) / (2.0 * float(N - t) * sigma2_AB) if (C <= 0.0) and (t > mintime): break g += 2.0 * C * (1.0 - float(t) / float(N)) * float(increment) t += increment if fast: increment += 1 if (g < 1.0): g = 1.0 return g
Compute the (cross) statistical inefficiency of (two) timeseries. Parameters ---------- A_n : np.ndarray, float A_n[n] is nth value of timeseries A. Length is deduced from vector. B_n : np.ndarray, float, optional, default=None B_n[n] is nth value of timeseries B. Length is deduced from vector. If supplied, the cross-correlation of timeseries A and B will be estimated instead of the autocorrelation of timeseries A. fast : bool, optional, default=False f True, will use faster (but less accurate) method to estimate correlation time, described in Ref. [1] (default: False). This is ignored when B_n=None and fft=True. mintime : int, optional, default=3 minimum amount of correlation function to compute (default: 3) The algorithm terminates after computing the correlation time out to mintime when the correlation function first goes negative. Note that this time may need to be increased if there is a strong initial negative peak in the correlation function. fft : bool, optional, default=False If fft=True and B_n=None, then use the fft based approach, as implemented in statisticalInefficiency_fft(). Returns ------- g : np.ndarray, g is the estimated statistical inefficiency (equal to 1 + 2 tau, where tau is the correlation time). We enforce g >= 1.0. Notes ----- The same timeseries can be used for both A_n and B_n to get the autocorrelation statistical inefficiency. The fast method described in Ref [1] is used to compute g. References ---------- [1] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted histogram analysis method for the analysis of simulated and parallel tempering simulations. JCTC 3(1):26-41, 2007. Examples -------- Compute statistical inefficiency of timeseries data with known correlation time. >>> from pymbar.testsystems import correlated_timeseries_example >>> A_n = correlated_timeseries_example(N=100000, tau=5.0) >>> g = statisticalInefficiency(A_n, fast=True)
8,940
def _get_ctypes(self): ctypes = [] for related_object in self.model._meta.get_all_related_objects(): model = getattr(related_object, , related_object.model) ctypes.append(ContentType.objects.get_for_model(model).pk) if model.__subclasses__(): for child in model.__subclasses__(): ctypes.append(ContentType.objects.get_for_model(child).pk) return ctypes
Returns all related objects for this model.
8,941
def VarintReader(buf, pos=0): result = 0 shift = 0 while 1: b = buf[pos] result |= (ORD_MAP_AND_0X7F[b] << shift) pos += 1 if not ORD_MAP_AND_0X80[b]: return (result, pos) shift += 7 if shift >= 64: raise rdfvalue.DecodeError("Too many bytes when decoding varint.")
A 64 bit decoder from google.protobuf.internal.decoder.
8,942
def read(self, fp): "Reads a dictionary from an input stream." base_size = struct.unpack(str("=I"), fp.read(4))[0] self._units.fromfile(fp, base_size)
Reads a dictionary from an input stream.
8,943
def pancake_sort(arr): len_arr = len(arr) if len_arr <= 1: return arr for cur in range(len(arr), 1, -1): index_max = arr.index(max(arr[0:cur])) if index_max+1 != cur: if index_max != 0: arr[:index_max+1] = reversed(arr[:index_max+1]) arr[:cur] = reversed(arr[:cur]) return arr
Pancake_sort Sorting a given array mutation of selection sort reference: https://www.geeksforgeeks.org/pancake-sorting/ Overall time complexity : O(N^2)
8,944
def iterate_pubmed_identifiers(graph) -> Iterable[str]: return ( data[CITATION][CITATION_REFERENCE].strip() for _, _, data in graph.edges(data=True) if has_pubmed(data) )
Iterate over all PubMed identifiers in a graph. :param pybel.BELGraph graph: A BEL graph :return: An iterator over the PubMed identifiers in the graph
8,945
def runcode(code): for line in code: print(+line) exec(line,globals()) print() return ans
Run the given code line by line with printing, as list of lines, and return variable 'ans'.
8,946
def generic_pst(par_names=["par1"],obs_names=["obs1"],addreg=False): if not isinstance(par_names,list): par_names = list(par_names) if not isinstance(obs_names,list): obs_names = list(obs_names) new_pst = pyemu.Pst("pest.pst",load=False) pargp_data = populate_dataframe(["pargp"], new_pst.pargp_fieldnames, new_pst.pargp_defaults, new_pst.pargp_dtype) new_pst.parameter_groups = pargp_data par_data = populate_dataframe(par_names,new_pst.par_fieldnames, new_pst.par_defaults,new_pst.par_dtype) par_data.loc[:,"parnme"] = par_names par_data.index = par_names par_data.sort_index(inplace=True) new_pst.parameter_data = par_data obs_data = populate_dataframe(obs_names,new_pst.obs_fieldnames, new_pst.obs_defaults,new_pst.obs_dtype) obs_data.loc[:,"obsnme"] = obs_names obs_data.index = obs_names obs_data.sort_index(inplace=True) new_pst.observation_data = obs_data new_pst.template_files = ["file.tpl"] new_pst.input_files = ["file.in"] new_pst.instruction_files = ["file.ins"] new_pst.output_files = ["file.out"] new_pst.model_command = ["model.bat"] new_pst.prior_information = new_pst.null_prior if addreg: new_pst.zero_order_tikhonov() return new_pst
generate a generic pst instance. This can used to later fill in the Pst parts programatically. Parameters ---------- par_names : (list) parameter names to setup obs_names : (list) observation names to setup Returns ------- new_pst : pyemu.Pst
8,947
def main(argv=None): if argv is None: argv = sys.argv[1:] cli = CommandLineTool() return cli.run(argv)
Main command line interface.
8,948
def get_record(self, path=None, no_pdf=False, test=False, refextract_callback=None): xml_doc = self.get_article(path) rec = create_record() title = self.get_title(xml_doc) if title: record_add_field(rec, , subfields=[(, title)]) (journal, dummy, volume, issue, first_page, last_page, year, start_date, doi) = self.get_publication_information(xml_doc, path) if not journal: journal = self.get_article_journal(xml_doc) if start_date: record_add_field(rec, , subfields=[(, start_date), (, )]) else: record_add_field( rec, , subfields=[(, time.strftime())]) if doi: record_add_field(rec, , ind1=, subfields=[(, doi), (, )]) license, license_url = self.get_license(xml_doc) if license and license_url: record_add_field(rec, , subfields=[(, license), (, license_url)]) elif license_url: record_add_field(rec, , subfields=[(, license_url)]) self.logger.info("Creating record: %s %s" % (path, doi)) authors = self.get_authors(xml_doc) first_author = True for author in authors: author_name = (author[], author.get( ) or author.get()) subfields = [(, % author_name)] if in author: subfields.append((, author[])) if in author: for aff in author["affiliation"]: subfields.append((, aff)) if self.extract_nations: add_nations_field(subfields) if author.get(): subfields.append((, author[])) if first_author: record_add_field(rec, , subfields=subfields) first_author = False else: record_add_field(rec, , subfields=subfields) abstract = self.get_abstract(xml_doc) if abstract: record_add_field(rec, , subfields=[(, abstract), (, )]) record_copyright = self.get_copyright(xml_doc) if record_copyright: record_add_field(rec, , subfields=[(, record_copyright)]) keywords = self.get_keywords(xml_doc) if self.CONSYN: for tag in xml_doc.getElementsByTagName(): collaboration = get_value_in_tag(tag, ) if collaboration: record_add_field(rec, , subfields=[(, collaboration)]) subjects = xml_doc.getElementsByTagName() for subject in subjects: for listitem in subject.getElementsByTagName(): keyword = xml_to_text(listitem) if keyword not in keywords: keywords.append(keyword) for keyword in keywords: record_add_field(rec, , ind1=, subfields=[(, keyword), (, )]) journal, dummy = fix_journal_name(journal.strip(), self.journal_mappings) subfields = [] doctype = self.get_doctype(xml_doc) try: page_count = int(last_page) - int(first_page) + 1 record_add_field(rec, , subfields=[(, str(page_count))]) except ValueError: pass if doctype == : subfields.append((, )) elif doctype == : subfields.append((, )) elif doctype == : subfields.append((, )) elif doctype == : record_add_field(rec, , subfields=[(, )]) if journal: subfields.append((, journal)) if first_page and last_page: subfields.append((, % (first_page, last_page))) elif first_page: subfields.append((, first_page)) if volume: subfields.append((, volume)) if year: subfields.append((, year)) record_add_field(rec, , subfields=subfields) if not test: if license: url = \ + path.split()[-1][:-4] record_add_field(rec, , ind1=, subfields=[(, url), (, )]) record_add_field(rec, , subfields=[(, path), (, ), (, )]) else: record_add_field(rec, , subfields=[(, path), (, ), (, )]) record_add_field(rec, , subfields=[(, )]) record_add_field(rec, , subfields=[(, )]) record_add_field(rec, , subfields=[(, )]) self._add_references(xml_doc, rec, refextract_callback) else: licence = record_add_field(rec, , subfields=[(, ), (, licence)]) if keywords: for keyword in keywords: record_add_field( rec, , ind1=, subfields=[(, keyword), (, )]) pages = if first_page and last_page: pages = .format(first_page, last_page) elif first_page: pages = first_page subfields = filter(lambda x: x[1] and x[1] != , [(, journal), (, volume), (, issue), (, pages), (, year)]) record_add_field(rec, , subfields=subfields) if not no_pdf: from invenio.search_engine import perform_request_search query = % (doi,) prev_version = perform_request_search(p=query) old_pdf = False if prev_version: from invenio.bibdocfile import BibRecDocs prev_rec = BibRecDocs(prev_version[0]) try: pdf_path = prev_rec.get_bibdoc() pdf_path = pdf_path.get_file( ".pdf;pdfa", exact_docformat=True) pdf_path = pdf_path.fullpath old_pdf = True record_add_field(rec, , subfields=[(, pdf_path), (, ), (, )]) message = ( + doi) self.logger.info(message) except: pass try: if exists(join(path, )): pdf_path = join(path, ) record_add_field(rec, , subfields=[(, pdf_path), (, ), (, )]) self.logger.debug( % (doi,)) elif exists(join(path, )): pdf_path = join(path, ) record_add_field(rec, , subfields=[(, pdf_path)]) else: if not old_pdf: message = "Record " + doi message += " doesn583lmain.xmlFFTa980aSCOAP3bElsevier')]) try: return record_xml_output(rec) except UnicodeDecodeError: message = "Found a bad char in the file for the article " + doi sys.stderr.write(message) return ""
Convert a record to MARCXML format. :param path: path to a record. :type path: string :param test: flag to determine if it is a test call. :type test: bool :param refextract_callback: callback to be used to extract unstructured references. It should return a marcxml formated string of the reference. :type refextract_callback: callable :returns: marcxml formated string.
8,949
def separate(self): collections = [] start = None end = None for index in self.indexes: if start is None: start = index end = start continue if index != (end + 1): collections.append( Collection(self.head, self.tail, self.padding, indexes=set(range(start, end + 1))) ) start = index end = index if start is None: collections.append( Collection(self.head, self.tail, self.padding) ) else: collections.append( Collection(self.head, self.tail, self.padding, indexes=range(start, end + 1)) ) return collections
Return contiguous parts of collection as separate collections. Return as list of :py:class:`~clique.collection.Collection` instances.
8,950
def cover(ctx, html=False): params = if html else with ctx.cd(ROOT): ctx.run(.format(params), pty=True)
Run tests suite with coverage
8,951
def get_fernet(): global _fernet log = LoggingMixin().log if _fernet: return _fernet try: from cryptography.fernet import Fernet, MultiFernet, InvalidToken global InvalidFernetToken InvalidFernetToken = InvalidToken except BuiltinImportError: log.warning( "cryptography not found - values will not be stored encrypted." ) _fernet = NullFernet() return _fernet try: fernet_key = configuration.conf.get(, ) if not fernet_key: log.warning( "empty cryptography key - values will not be stored encrypted." ) _fernet = NullFernet() else: _fernet = MultiFernet([ Fernet(fernet_part.encode()) for fernet_part in fernet_key.split() ]) _fernet.is_encrypted = True except (ValueError, TypeError) as ve: raise AirflowException("Could not create Fernet object: {}".format(ve)) return _fernet
Deferred load of Fernet key. This function could fail either because Cryptography is not installed or because the Fernet key is invalid. :return: Fernet object :raises: airflow.exceptions.AirflowException if there's a problem trying to load Fernet
8,952
def draw_linecollection(data, obj): content = [] edgecolors = obj.get_edgecolors() linestyles = obj.get_linestyles() linewidths = obj.get_linewidths() paths = obj.get_paths() for i, path in enumerate(paths): color = edgecolors[i] if i < len(edgecolors) else edgecolors[0] style = linestyles[i] if i < len(linestyles) else linestyles[0] width = linewidths[i] if i < len(linewidths) else linewidths[0] data, options = mypath.get_draw_options(data, obj, color, None, style, width) data, cont, _, _ = mypath.draw_path( data, path, draw_options=options, simplify=False ) content.append(cont + "\n") return data, content
Returns Pgfplots code for a number of patch objects.
8,953
async def start(self): self._command_task.start() try: await self._cleanup_old_connections() except Exception: await self.stop() raise iotile_id = next(iter(self.adapter.devices)) self.device = self.adapter.devices[iotile_id] self._logger.info("Serving device 0x%04X over BLED112", iotile_id) await self._update_advertisement() self.setup_client(self.CLIENT_ID, scan=False, broadcast=True)
Start serving access to devices over bluetooth.
8,954
def satisfies(self, other): return ( self.dependency.name == other.dependency.name and self.relation(other) == SetRelation.SUBSET )
Returns whether this term satisfies another.
8,955
def configure_mongodb(self): self._display_info("Trying default configuration") host = "localhost" database_name = "INGInious" should_ask = True if self.try_mongodb_opts(host, database_name): should_ask = self._ask_boolean( "Successfully connected to MongoDB. Do you want to edit the configuration anyway?", False) else: self._display_info("Cannot guess configuration for MongoDB.") while should_ask: self._display_question( "Please enter the MongoDB host. If you need to enter a password, here is the syntax:") self._display_question("mongodb://USERNAME:PASSWORD@HOST:PORT/AUTHENTIFICATION_DATABASE") host = self._ask_with_default("MongoDB host", host) database_name = self._ask_with_default("Database name", database_name) if not self.try_mongodb_opts(host, database_name): if self._ask_boolean("Cannot connect to MongoDB. Would you like to continue anyway?", False): break else: self._display_info("Successfully connected to MongoDB") break return {"mongo_opt": {"host": host, "database": database_name}}
Configure MongoDB
8,956
def dist(self): underlying_dist = self.underlying_dist if self._execution_strategy != NailgunTaskBase.HERMETIC: jdk_home_symlink = os.path.relpath( os.path.join(self._zinc_factory.get_options().pants_workdir, ), get_buildroot()) with self._lock: if not os.path.exists(jdk_home_symlink): os.symlink(underlying_dist.home, jdk_home_symlink) elif os.readlink(jdk_home_symlink) != underlying_dist.home: os.remove(jdk_home_symlink) os.symlink(underlying_dist.home, jdk_home_symlink) return Distribution(home_path=jdk_home_symlink) else: return underlying_dist
Return the `Distribution` selected for Zinc based on execution strategy. :rtype: pants.java.distribution.distribution.Distribution
8,957
def imgmin(self): if not hasattr(self, ): imgmin = _np.min(self.images[0]) for img in self.images: imin = _np.min(img) if imin > imgmin: imgmin = imin self._imgmin = imgmin return _np.min(self.image)
Lowest value of input image.
8,958
def get_path(self): path = deque() __, node = self.get_focus() while not node.is_root(): stats = node.get_value() path.appendleft(hash(stats)) node = node.get_parent() return path
Gets the path to the focused statistics. Each step is a hash of statistics object.
8,959
def SetupDisplayDevice(self, type, state, percentage, energy, energy_full, energy_rate, time_to_empty, time_to_full, is_present, icon_name, warning_level): if not self.api1: raise dbus.exceptions.DBusException( , name=MOCK_IFACE + ) display_props = mockobject.objects[self.p_display_dev] display_props.Set(DEVICE_IFACE, , dbus.UInt32(type)) display_props.Set(DEVICE_IFACE, , dbus.UInt32(state)) display_props.Set(DEVICE_IFACE, , percentage) display_props.Set(DEVICE_IFACE, , energy) display_props.Set(DEVICE_IFACE, , energy_full) display_props.Set(DEVICE_IFACE, , energy_rate) display_props.Set(DEVICE_IFACE, , dbus.Int64(time_to_empty)) display_props.Set(DEVICE_IFACE, , dbus.Int64(time_to_full)) display_props.Set(DEVICE_IFACE, , is_present) display_props.Set(DEVICE_IFACE, , icon_name) display_props.Set(DEVICE_IFACE, , dbus.UInt32(warning_level))
Convenience method to configure DisplayDevice properties This calls Set() for all properties that the DisplayDevice is defined to have, and is shorter if you have to completely set it up instead of changing just one or two properties. This is only available when mocking the 1.0 API.
8,960
def save_user(self, idvalue, options=None): options = options or {} data = options.copy() data[] = idvalue return self.api_post(, data)
save user by a given id http://getstarted.sailthru.com/api/user
8,961
def configure_threecolor_image(self): order = {: 0, : 1, : 2} self.image = np.zeros((self.shape[0], self.shape[1], 3)) for color, var in self.multicolorvars.items(): channel = var.get() self.image[:, :, order[color]] = self.data[channel] self.image[:, :, order[color]] = np.power(self.image[:, :, order[color]], self.multicolorpower[color].get()) lower = np.nanpercentile(self.image[:, :, order[color]], self.multicolormin[color].get()) upper = np.nanpercentile(self.image[:, :, order[color]], self.multicolormax[color].get()) self.image[np.where(self.image[:, :, order[color]] < lower)] = lower self.image[np.where(self.image[:, :, order[color]] > upper)] = upper for color, index in order.items(): self.image[:, :, index] /= np.nanmax(self.image[:, :, index])
configures the three color image according to the requested parameters :return: nothing, just updates self.image
8,962
def add(self, data, conn_type, squash=True): if data in self.children: return data if not squash: self.children.append(data) return data if self.connector == conn_type: if (isinstance(data, QBase) and not data.negated and (data.connector == conn_type or len(data) == 1)): self.children.extend(data.children) return self else: self.children.append(data) return data else: obj = self._new_instance(self.children, self.connector, self.negated) self.connector = conn_type self.children = [obj, data] return data
Combine this tree and the data represented by data using the connector conn_type. The combine is done by squashing the node other away if possible. This tree (self) will never be pushed to a child node of the combined tree, nor will the connector or negated properties change. Return a node which can be used in place of data regardless if the node other got squashed or not. If `squash` is False the data is prepared and added as a child to this tree without further logic. Args: conn_type (str, optional ["AND", "OR"]): connection method
8,963
def get_all_dhcp_options(self, dhcp_options_ids=None): params = {} if dhcp_options_ids: self.build_list_params(params, dhcp_options_ids, ) return self.get_list(, params, [(, DhcpOptions)])
Retrieve information about your DhcpOptions. :type dhcp_options_ids: list :param dhcp_options_ids: A list of strings with the desired DhcpOption ID's :rtype: list :return: A list of :class:`boto.vpc.dhcpoptions.DhcpOptions`
8,964
def adopt(self, grab): self.load_config(grab.config) self.doc = grab.doc.copy(new_grab=self) for key in self.clonable_attributes: setattr(self, key, getattr(grab, key)) self.cookies = deepcopy(grab.cookies)
Copy the state of another `Grab` instance. Use case: create backup of current state to the cloned instance and then restore the state from it.
8,965
def dropdb(self, name): if self.readonly: raise s_exc.IsReadOnly() while True: try: if not self.dbexists(name): return db = self.initdb(name) self.dirty = True self.xact.drop(db.db, delete=True) self.forcecommit() return except lmdb.MapFullError: self._handle_mapfull()
Deletes an **entire database** (i.e. a table), losing all data.
8,966
def assign(self, node): child_object = self.translate(node.child) child_object.prefix = \ .format(name=node.name, attributes=.join(node.attributes.names)) return child_object
Translate an assign node into SQLQuery. :param node: a treebrd node :return: a SQLQuery object for the tree rooted at node
8,967
def refresh(self): if self.object_id is None: url = self.build_url(self._endpoints.get()) else: url = self.build_url( self._endpoints.get().format(id=self.object_id)) response = self.con.get(url) if not response: return False drive = response.json() self._update_data({self._cloud_data_key: drive}) return True
Updates this drive with data from the server :return: Success / Failure :rtype: bool
8,968
def do_EOF(self, args): if _debug: ConsoleCmd._debug("do_EOF %r", args) return self.do_exit(args)
Exit on system end of file character
8,969
def is_same_as(self, other_databox, headers=True, columns=True, header_order=True, column_order=True, ckeys=True): d = other_databox if not hasattr(other_databox, ): return False if headers: if not len(self.hkeys) == len(d.hkeys): return False if header_order and not self.hkeys == d.hkeys: return False for k in self.hkeys: if not k in d.hkeys: return False if ckeys: if column_order and not self.ckeys == d.ckeys: return False for k in self.ckeys: if not k in d.ckeys: return False if not (_n.array(self[k]) == _n.array(d[k])).all(): return False else: for n in range(len(self.ckeys)): if not (_n.array(self[n]) == _n.array(d[n])).all(): return False return True
Tests that the important (i.e. savable) information in this databox is the same as that of the other_databox. Parameters ---------- other_databox Databox with which to compare. headers=True Make sure all header elements match. columns=True Make sure every element of every column matches. header_order=True Whether the order of the header elements must match. column_order=True Whether the order of the columns must match. This is only a sensible concern if ckeys=True. ckeys=True Whether the actual ckeys matter, or just the ordered columns of data. Note the == symbol runs this function with everything True.
8,970
def is_allowed(func): @wraps(func) def _is_allowed(user, *args, **kwargs): password = kwargs.pop(, None) if user.check_password(password): return func(user, *args, **kwargs) else: raise NotAllowedError() sig = inspect.signature(func) parms = list(sig.parameters.values()) parms.append(inspect.Parameter(, inspect.Parameter.KEYWORD_ONLY, default=None)) _is_allowed.__signature__ = sig.replace(parameters=parms) return _is_allowed
Check user password, when is correct, then run decorated function. :returns: decorated function
8,971
def song(self): song = self._connection.request( , {: [-9, 9], : dict([(artist, ) for artist in self._artists]), : self._radio, : self._recent_artists, : self._connection.session.queue, : 0.75, : self._connection.session.country, : [110, 130], : self._songs_already_seen, : 1500, : 60, : []}, self._connection.header(, ))[1] return Song( song[], song[], song[], song[], song[], song[], song[], None, song[], None, self._connection)
:class:`Song` object of next song to play
8,972
def _predict(self, features): from sklearn.exceptions import NotFittedError try: prediction = self.kernel.predict_classes(features)[:, 0] except NotFittedError: raise NotFittedError( "{} is not fitted yet. Call with appropriate " "arguments before using this method.".format( type(self).__name__ ) ) return prediction
Predict matches and non-matches. Parameters ---------- features : numpy.ndarray The data to predict the class of. Returns ------- numpy.ndarray The predicted classes.
8,973
def to_categorical(y, nb_classes, num_classes=None): if num_classes is not None: if nb_classes is not None: raise ValueError("Should not specify both nb_classes and its deprecated " "alias, num_classes") warnings.warn("`num_classes` is deprecated. Switch to `nb_classes`." " `num_classes` may be removed on or after 2019-04-23.") nb_classes = num_classes del num_classes y = np.array(y, dtype=).ravel() n = y.shape[0] categorical = np.zeros((n, nb_classes)) categorical[np.arange(n), y] = 1 return categorical
Converts a class vector (integers) to binary class matrix. This is adapted from the Keras function with the same name. :param y: class vector to be converted into a matrix (integers from 0 to nb_classes). :param nb_classes: nb_classes: total number of classes. :param num_classses: depricated version of nb_classes :return: A binary matrix representation of the input.
8,974
def register_pubkey(self): p = pkcs_os2ip(self.dh_p) g = pkcs_os2ip(self.dh_g) pn = dh.DHParameterNumbers(p, g) y = pkcs_os2ip(self.dh_Ys) public_numbers = dh.DHPublicNumbers(y, pn) s = self.tls_session s.server_kx_pubkey = public_numbers.public_key(default_backend()) if not s.client_kx_ffdh_params: s.client_kx_ffdh_params = pn.parameters(default_backend())
XXX Check that the pubkey received is in the group.
8,975
def url_read_text(url, verbose=True): r data = url_read(url, verbose) text = data.decode() return text
r""" Directly reads text data from url
8,976
def manage_service_check_result_brok(self, b): host_name = b.data.get(, None) service_description = b.data.get(, None) if not host_name or not service_description: return service_id = host_name+"/"+service_description logger.debug("service check result: %s", service_id) if not self.ignore_unknown and host_name not in self.hosts_cache: logger.warning("received service check result for an unknown host: %s", service_id) return if service_id not in self.services_cache and not self.ignore_unknown: logger.warning("received service check result for an unknown service: %s", service_id) return metrics = self.get_metrics_from_perfdata(service_description, b.data[]) if not metrics: logger.debug("no metrics to send ...") return if self.ignore_latency_limit >= b.data[] > 0: check_time = int(b.data[]) - int(b.data[]) else: check_time = int(b.data[]) hname = sanitize_name(host_name) if host_name in self.hosts_cache: if self.hosts_cache[host_name].get(, None): hname = ".".join((self.hosts_cache[host_name].get(), hname)) if self.hosts_cache[host_name].get(, None): hname = ".".join((self.hosts_cache[host_name].get(), hname)) desc = sanitize_name(service_description) if service_id in self.services_cache: if self.services_cache[service_id].get(, None): desc = ".".join((desc, self.services_cache[service_id].get(, None))) if self.graphite_data_source: path = .join((hname, self.graphite_data_source, desc)) else: path = .join((hname, desc)) if self.realms_prefix and self.hosts_cache[host_name].get(, None): path = .join((self.hosts_cache[host_name].get(), path)) realm_name = None if host_name in self.hosts_cache: realm_name = self.hosts_cache[host_name].get(, None) self.send_to_tsdb(realm_name, host_name, service_description, metrics, check_time, path)
A service check result brok has just arrived ...
8,977
def _get_qe(self, key, obj): if key in self._cached: return self._cached[key] qe = create_query_engine(obj, self._class) self._cached[key] = qe return qe
Instantiate a query engine, or retrieve a cached one.
8,978
def get_assessment_part_ids_by_banks(self, bank_ids): id_list = [] for assessment_part in self.get_assessment_parts_by_banks(bank_ids): id_list.append(assessment_part.get_id()) return IdList(id_list)
Gets the list of ``AssessmentPart Ids`` corresponding to a list of ``Banks``. arg: bank_ids (osid.id.IdList): list of bank ``Ids`` return: (osid.id.IdList) - list of assessment part ``Ids`` raise: NullArgument - ``bank_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
8,979
def public_key_sec(self): if self.is_coinbase(): return None opcodes = ScriptTools.opcode_list(self.script) if len(opcodes) == 2 and opcodes[0].startswith("[30"): sec = h2b(opcodes[1][1:-1]) return sec return None
Return the public key as sec, or None in case of failure.
8,980
def read(self, filename=None): self._init_filename(filename) data = odict() with open(self.real_filename) as ndx: current_section = None for line in ndx: line = line.strip() if len(line) == 0: continue m = self.SECTION.match(line) if m: current_section = m.group() data[current_section] = [] continue if current_section is not None: data[current_section].extend(map(int, line.split())) super(NDX,self).update(odict([(name, self._transform(atomnumbers)) for name, atomnumbers in data.items()]))
Read and parse index file *filename*.
8,981
def draw(self): if self.hidden: return False if self.background_color is not None: render.fillrect(self.surface, self.background_color, rect=pygame.Rect((0, 0), self.frame.size)) for child in self.children: if not child.hidden: child.draw() topleft = child.frame.topleft if child.shadowed: shadow_size = theme.current.shadow_size shadow_topleft = (topleft[0] - shadow_size // 2, topleft[1] - shadow_size // 2) self.surface.blit(child.shadow_image, shadow_topleft) self.surface.blit(child.surface, topleft) if child.border_color and child.border_widths is not None: if (type(child.border_widths) is int and child.border_widths > 0): pygame.draw.rect(self.surface, child.border_color, child.frame, child.border_widths) else: tw, lw, bw, rw = child.get_border_widths() tl = (child.frame.left, child.frame.top) tr = (child.frame.right - 1, child.frame.top) bl = (child.frame.left, child.frame.bottom - 1) br = (child.frame.right - 1, child.frame.bottom - 1) if tw > 0: pygame.draw.line(self.surface, child.border_color, tl, tr, tw) if lw > 0: pygame.draw.line(self.surface, child.border_color, tl, bl, lw) if bw > 0: pygame.draw.line(self.surface, child.border_color, bl, br, bw) if rw > 0: pygame.draw.line(self.surface, child.border_color, tr, br, rw) return True
Do not call directly.
8,982
def set(self, column, value, useMethod=True, **context): col = self.schema().column(column, raise_=False) if col is None: collector = self.schema().collector(column) if collector: my_context = self.context() for k, v in my_context.raw_values.items(): if k not in orb.Context.QueryFields: context.setdefault(k, v) sub_context = orb.Context(**context) method = collector.settermethod() if method and useMethod: return method(self, value, context=sub_context) else: records = self.get(collector.name(), context=sub_context) records.update(value, useMethod=useMethod, context=sub_context) self.__preload.pop(collector.name(), None) return records else: raise errors.ColumnNotFound(schema=self.schema(), column=column) elif col.testFlag(col.Flags.ReadOnly): raise errors.ColumnReadOnly(schema=self.schema(), column=column) context = self.context(**context) if useMethod: method = col.settermethod() if method: keywords = list(funcutil.extract_keywords(method)) if in keywords: return method(self, value, locale=context.locale) else: return method(self, value) if self.isRecord() and self.__delayed: self.__delayed = False self.read() with WriteLocker(self.__dataLock): orig, curr = self.__values.get(col.name(), (None, None)) value = col.store(value, context) if col.testFlag(col.Flags.I18n) and isinstance(curr, dict) and isinstance(value, dict): new_value = curr.copy() new_value.update(value) value = new_value try: change = curr != value except TypeError: change = True if change: self.__values[col.name()] = (orig, value) if change: if col.testFlag(col.Flags.I18n) and context.locale != : old_value = curr.get(context.locale) if isinstance(curr, dict) else curr new_value = value.get(context.locale) if isinstance(value, dict) else value else: old_value = curr new_value = value event = orb.events.ChangeEvent(record=self, column=col, old=old_value, value=new_value) if self.processEvent(event): self.onChange(event) if event.preventDefault: with WriteLocker(self.__dataLock): orig, _ = self.__values.get(col.name(), (None, None)) self.__values[col.name()] = (orig, curr) return False else: return change else: return False
Sets the value for this record at the inputted column name. If the columnName provided doesn't exist within the schema, then the ColumnNotFound error will be raised. :param columnName | <str> value | <variant> :return <bool> changed
8,983
def dump_xearth_markers(markers, name=): output = [] for identifier, point in markers.items(): line = [ % (point.latitude, point.longitude), ] if hasattr(point, ) and point.name: if name == : line.append( % (identifier, point.name)) elif name == : line.append( % (point.name, identifier)) elif name == : line.append( % (identifier, point.comment)) else: raise ValueError( % name) if hasattr(point, ) and point.altitude: line.append( % point.altitude) else: line.append( % identifier) output.append(.join(line)) return sorted(output, key=lambda x: x.split()[2])
Generate an Xearth compatible marker file. ``dump_xearth_markers()`` writes a simple Xearth_ marker file from a dictionary of :class:`trigpoints.Trigpoint` objects. It expects a dictionary in one of the following formats. For support of :class:`Trigpoint` that is:: {500936: Trigpoint(52.066035, -0.281449, 37.0, "Broom Farm"), 501097: Trigpoint(52.010585, -0.173443, 97.0, "Bygrave"), 505392: Trigpoint(51.910886, -0.186462, 136.0, "Sish Lane")} And generates output of the form:: 52.066035 -0.281449 "500936" # Broom Farm, alt 37m 52.010585 -0.173443 "501097" # Bygrave, alt 97m 51.910886 -0.186462 "205392" # Sish Lane, alt 136m Or similar to the following if the ``name`` parameter is set to ``name``:: 52.066035 -0.281449 "Broom Farm" # 500936 alt 37m 52.010585 -0.173443 "Bygrave" # 501097 alt 97m 51.910886 -0.186462 "Sish Lane" # 205392 alt 136m Point objects should be provided in the following format:: {"Broom Farm": Point(52.066035, -0.281449), "Bygrave": Point(52.010585, -0.173443), "Sish Lane": Point(51.910886, -0.186462)} And generates output of the form:: 52.066035 -0.281449 "Broom Farm" 52.010585 -0.173443 "Bygrave" 51.910886 -0.186462 "Sish Lane" Note: xplanet_ also supports xearth marker files, and as such can use the output from this function. See also: upoints.xearth.Xearths.import_locations Args: markers (dict): Dictionary of identifier keys, with :class:`Trigpoint` values name (str): Value to use as Xearth display string Returns: list: List of strings representing an Xearth marker file Raises: ValueError: Unsupported value for ``name`` .. _xearth: http://hewgill.com/xearth/original/ .. _xplanet: http://xplanet.sourceforge.net/
8,984
def git_checkout(repo_dir, ref, branch=None): command = [, , ] if branch: command.extend([, .format(branch)]) command.append(ref) return execute_git_command(command, repo_dir=repo_dir)
Do a git checkout of `ref` in `repo_dir`. If branch is specified it should be the name of the new branch.
8,985
def setup(self, settings): self.extract = tldextract.TLDExtract() self.redis_conn = redis.Redis(host=settings[], port=settings[], db=settings.get()) try: self.redis_conn.info() self.logger.debug("Connected to Redis in ZookeeperHandler") except ConnectionError: self.logger.error("Failed to connect to Redis in ZookeeperHandler") sys.exit(1)
Setup redis and tldextract
8,986
def result(self): self.__result.sort(cmp = self.__cmp, key = self.__key, reverse = self.__reverse) return self.__result
Formats the result.
8,987
def make_opfields( cls ): opfields = {} for opname in SERIALIZE_FIELDS.keys(): opcode = NAME_OPCODES[opname] opfields[opcode] = SERIALIZE_FIELDS[opname] return opfields
Calculate the virtulachain-required opfields dict.
8,988
def allowed_values(self): data = clips.data.DataObject(self._env) lib.EnvSlotAllowedValues( self._env, self._cls, self._name, data.byref) return tuple(data.value) if isinstance(data.value, list) else ()
A tuple containing the allowed values for this Slot. The Python equivalent of the CLIPS slot-allowed-values function.
8,989
def get_next(self, label): while self._get_current_label() != label: self._skip_section() return self._read_section()
Get the next section with the given label
8,990
def to_array(self): array = super(Chat, self).to_array() array[] = int(self.id) array[] = u(self.type) if self.title is not None: array[] = u(self.title) if self.username is not None: array[] = u(self.username) if self.first_name is not None: array[] = u(self.first_name) if self.last_name is not None: array[] = u(self.last_name) if self.all_members_are_administrators is not None: array[] = bool(self.all_members_are_administrators) if self.photo is not None: array[] = self.photo.to_array() if self.description is not None: array[] = u(self.description) if self.invite_link is not None: array[] = u(self.invite_link) if self.pinned_message is not None: array[] = self.pinned_message.to_array() if self.sticker_set_name is not None: array[] = u(self.sticker_set_name) if self.can_set_sticker_set is not None: array[] = bool(self.can_set_sticker_set) return array
Serializes this Chat to a dictionary. :return: dictionary representation of this object. :rtype: dict
8,991
def create_hosted_zone(self, name, caller_reference=None, comment=None): body = xml_generators.create_hosted_zone_writer( connection=self, name=name, caller_reference=caller_reference, comment=comment ) root = self._send_request( path=, data=body, method=, ) return xml_parsers.created_hosted_zone_parser( root=root, connection=self )
Creates and returns a new hosted zone. Once a hosted zone is created, its details can't be changed. :param str name: The name of the hosted zone to create. :keyword str caller_reference: A unique string that identifies the request and that allows failed create_hosted_zone requests to be retried without the risk of executing the operation twice. If no value is given, we'll generate a Type 4 UUID for you. :keyword str comment: An optional comment to attach to the zone. :rtype: tuple :returns: A tuple in the form of ``(hosted_zone, change_info)``. The ``hosted_zone`` variable contains a :py:class:`HostedZone <route53.hosted_zone.HostedZone>` instance matching the newly created zone, and ``change_info`` is a dict with some details about the API request.
8,992
def i18n_install(lc=None): log.debug(.format(lc=lc)) if lc is None: lc = i18n_system_locale() if lc is None: log.debug() translator = gettext.NullTranslations() else: child_locales = i18n_support_locale(lc) log.debug( .format(domain=project.PROJECT_TITLE.lower(), localedir=i18n_get_path(), languages=child_locales, fallback=True)) translator = gettext.translation( domain=project.PROJECT_TITLE.lower(), localedir=str(i18n_get_path()), languages=child_locales, fallback=True) translator.install(names=[])
Install internationalization support for the clients using the specified locale. If there is no support for the locale, the default locale will be used. As last resort, a null translator will be installed. :param lc: locale to install. If None, the system default locale will be used.
8,993
def from_long(self, number): if not isinstance(number, baseinteger): raise TypeError("number can only be an instance of type baseinteger") self._call("fromLong", in_p=[number])
Make PCI address from long. in number of type int
8,994
def buckets_insert(self, bucket, project_id=None): args = {: project_id if project_id else self._project_id} data = {: bucket} url = Api._ENDPOINT + (Api._BUCKET_PATH % ) return datalab.utils.Http.request(url, args=args, data=data, credentials=self._credentials)
Issues a request to create a new bucket. Args: bucket: the name of the bucket. project_id: the project to use when inserting the bucket. Returns: A parsed bucket information dictionary. Raises: Exception if there is an error performing the operation.
8,995
def get_route(self, file_id): title = % self.__class__.__name__ input_fields = { : file_id, } for key, value in input_fields.items(): if value: object_title = % (title, key, str(value)) self.fields.validate(value, % key, object_title) request_kwargs = { : % self.api_endpoint, : { : file_id } } response_details = self._post_request(**request_kwargs) return response_details
a method to retrieve route information for file on telegram api :param file_id: string with id of file in a message send to bot :return: dictionary of response details with route details in [json][result]
8,996
def _equivalent_node_iterator_helper(self, node: BaseEntity, visited: Set[BaseEntity]) -> BaseEntity: for v in self[node]: if v in visited: continue if self._has_no_equivalent_edge(node, v): continue visited.add(v) yield v yield from self._equivalent_node_iterator_helper(v, visited)
Iterate over nodes and their data that are equal to the given node, starting with the original.
8,997
def monte_carlo_vol(self, ndraws=10000, rstate=None, return_overlap=True): if rstate is None: rstate = np.random samples = [self.sample(rstate=rstate, return_q=True) for i in range(ndraws)] qsum = sum([q for (x, idx, q) in samples]) vol = 1. * ndraws / qsum * self.vol_tot if return_overlap: qin = sum([q * unitcheck(x) for (x, idx, q) in samples]) overlap = 1. * qin / qsum return vol, overlap else: return vol
Using `ndraws` Monte Carlo draws, estimate the volume of the *union* of ellipsoids. If `return_overlap=True`, also returns the estimated fractional overlap with the unit cube.
8,998
def read_wave(path): with contextlib.closing(wave.open(path, )) as wf: num_channels = wf.getnchannels() assert num_channels == 1 sample_width = wf.getsampwidth() assert sample_width == 2 sample_rate = wf.getframerate() assert sample_rate in (8000, 16000, 32000) frames = wf.getnframes() pcm_data = wf.readframes(frames) duration = frames / sample_rate return pcm_data, sample_rate, duration
Reads a .wav file. Takes the path, and returns (PCM audio data, sample rate).
8,999
def bytes_available(device): bytes_avail = 0 if isinstance(device, alarmdecoder.devices.SerialDevice): if hasattr(device._device, "in_waiting"): bytes_avail = device._device.in_waiting else: bytes_avail = device._device.inWaiting() elif isinstance(device, alarmdecoder.devices.SocketDevice): bytes_avail = 4096 return bytes_avail
Determines the number of bytes available for reading from an AlarmDecoder device :param device: the AlarmDecoder device :type device: :py:class:`~alarmdecoder.devices.Device` :returns: int