Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
19,800
def init_app(self, app): provider = app.config.get("STORAGE_PROVIDER", None) key = app.config.get("STORAGE_KEY", None) secret = app.config.get("STORAGE_SECRET", None) container = app.config.get("STORAGE_CONTAINER", None) allowed_extensions = app.config.get("STORAGE_ALLOWED_EXTENSIONS", None) serve_files = app.config.get("STORAGE_SERVER", True) serve_files_url = app.config.get("STORAGE_SERVER_URL", "files") self.config["serve_files"] = serve_files self.config["serve_files_url"] = serve_files_url if not provider: raise ValueError(" is missing") if provider.upper() == "LOCAL": if not os.path.isdir(container): raise IOError("Local Container (directory) is not a " "directory or doesn't exist for LOCAL provider" % container) self.__init__(provider=provider, key=key, secret=secret, container=container, allowed_extensions=allowed_extensions) self._register_file_server(app)
To initiate with Flask :param app: Flask object :return:
19,801
def add_watch_point(self, string, rating, importance=5): d = {} d[] = string d[] = rating d[] = importance self.watch_points.append(d)
For a log session you can add as many watch points which are used in the aggregation and extraction of key things that happen. Each watch point has a rating (up to you and can range from success to total failure and an importance for finer control of display
19,802
def temporarily_enabled(self): old_setting = self.options.enabled self.enable() try: yield finally: self.options.enabled = old_setting
Temporarily enable the cache (useful for testing)
19,803
def credential_delete(self, *ids): return self.raw_query("credential", "delete", data={ "credentials": [{"id": str(id)} for id in ids] })
Delete one or more credentials. :param ids: one or more credential ids
19,804
def add_house(self, complex: str, **kwargs): self.check_complex(complex) self.post(.format(developer=self.developer, complex=complex), data=kwargs)
Add a new house to the rumetr db
19,805
def guinieranalysis(samplenames, qranges=None, qmax_from_shanum=True, prfunctions_postfix=, dist=None, plotguinier=True, graph_extension=, dmax=None, dmax_from_shanum=False): figpr = plt.figure() ip = get_ipython() axpr = figpr.add_subplot(1, 1, 1) if qranges is None: qranges = {: (0, 1000000)} if dmax is None: dmax = {: None} if not in qranges: qranges[] = (0, 1000000) if not in dmax: dmax[] = None table_autorg = [[, , , , , , , , , , ]] table_gnom = [[, , , , , , , , ]] results = {} for sn in samplenames: if sn not in qranges: print(.format(sn)) qrange = qranges[] else: qrange = qranges[sn] if sn not in dmax: dmax_ = dmax[] else: dmax_ = dmax[sn] print(.format(sn, qrange[0], qrange[1])) curve = getsascurve(sn, dist)[0].trim(*qrange).sanitize() curve.save(sn + ) try: Rg, I0, qmin, qmax, quality, aggregation = autorg(sn + ) except ValueError: print( % sn) continue dmax_shanum, nsh, nopt, qmaxopt = shanum(sn + ) if qmax_from_shanum: curve_trim = curve.trim(qmin, qmaxopt) else: curve_trim = curve.trim(qmin, qrange[1]) if dmax_from_shanum: dmax_ = dmax_from_shanum curve_trim.save(sn + ) if dmax_ is None: print(.format( sn, Rg.val, curve_trim.q.min(), curve_trim.q.max())) gnompr, metadata = datgnom(sn + , Rg=Rg.val, noprint=True) else: print(.format( sn, dmax_, curve_trim.q.min(), curve_trim.q.max())) gnompr, metadata = gnom(curve_trim, dmax_) rg, i0, vporod = datporod(sn + ) axpr.errorbar(gnompr[:, 0], gnompr[:, 1], gnompr[:, 2], None, label=sn) if plotguinier: figsample = plt.figure() axgnomfit = figsample.add_subplot(1, 2, 1) curve.errorbar(, axes=axgnomfit, label=) axgnomfit.errorbar(metadata[], metadata[], metadata[], None, , label=) axgnomfit.loglog(metadata[], metadata[], , label=) figsample.suptitle(sn) axgnomfit.set_xlabel() axgnomfit.set_ylabel() axgnomfit.axvline(qmaxopt, 0, 1, linestyle=, color=, lw=2) axgnomfit.grid(True, which=) axgnomfit.axis() axgnomfit.legend(loc=) axguinier = figsample.add_subplot(1, 2, 2) axguinier.errorbar(curve.q, curve.Intensity, curve.Error, curve.qError, , label=) q = np.linspace(qmin, qmax, 100) axguinier.plot(q, I0.val * np.exp(-q ** 2 * Rg.val ** 2 / 3), label=) axguinier.plot(q, metadata[].val * np.exp(-q ** 2 * metadata[].val ** 2 / 3), label=) axguinier.set_xscale(, exponent=2) axguinier.set_yscale() axguinier.set_xlabel() axguinier.set_ylabel() axguinier.legend(loc=) idxmin = np.arange(len(curve))[curve.q <= qmin].max() idxmax = np.arange(len(curve))[curve.q >= qmax].min() idxmin = max(0, idxmin - 5) idxmax = min(len(curve) - 1, idxmax + 5) if plotguinier: curveguinier = curve.trim(curve.q[idxmin], curve.q[idxmax]) axguinier.axis(xmax=curve.q[idxmax], xmin=curve.q[idxmin], ymin=curveguinier.Intensity.min(), ymax=curveguinier.Intensity.max()) axguinier.grid(True, which=) table_gnom.append( [sn, metadata[].tostring(extra_digits=2), metadata[].tostring(extra_digits=2), metadata[], metadata[], metadata[], metadata[], metadata[], vporod]) table_autorg.append([sn, Rg.tostring(extra_digits=2), I0, % qmin, % qmax, qmin * Rg, qmax * Rg, % (quality * 100), aggregation, % dmax_shanum, % qmaxopt]) if plotguinier: figsample.tight_layout() figsample.savefig(os.path.join(ip.user_ns[], % (sn, graph_extension)), dpi=600) results[sn] = { : Rg, : I0, : qmin, : qmax, : quality, : aggregation, : dmax_shanum, : qmaxopt, : metadata[], : metadata[], : metadata[], : metadata[], : metadata[], : metadata[], : vporod, } axpr.set_xlabel() axpr.set_ylabel() axpr.legend(loc=) axpr.grid(True, which=) writemarkdown() tab = ipy_table.IpyTable(table_autorg) tab.apply_theme() display(tab) writemarkdown() tab = ipy_table.IpyTable(table_gnom) tab.apply_theme() if prfunctions_postfix and prfunctions_postfix[0] != : prfunctions_postfix = + prfunctions_postfix figpr.tight_layout() figpr.savefig(os.path.join(ip.user_ns[], % (prfunctions_postfix, graph_extension)), dpi=600) display(tab) return results
Perform Guinier analysis on the samples. Inputs: samplenames: list of sample names qranges: dictionary of q ranges for each sample. The keys are sample names. The special '__default__' key corresponds to all samples which do not have a key in the dict. qmax_from_shanum: use the qmax determined by the shanum program for the GNOM input. prfunctions_postfix: The figure showing the P(r) functions will be saved as prfunctions_<prfunctions_postfix><graph_extension> dist: the sample-to-detector distance to use. plotguinier: if Guinier plots are needed. graph_extension: the extension of the saved graph image files. dmax: Dict of Dmax parameters. If not found or None, determine automatically using DATGNOM. If found, GNOM is used. The special key '__default__' works in a similar fashion as for `qranges`.
19,806
def iter_packages(self, name, range_=None, paths=None): for package in iter_packages(name, range_, paths): if not self.excludes(package): yield package
Same as iter_packages in packages.py, but also applies this filter. Args: name (str): Name of the package, eg 'maya'. range_ (VersionRange or str): If provided, limits the versions returned to those in `range_`. paths (list of str, optional): paths to search for packages, defaults to `config.packages_path`. Returns: `Package` iterator.
19,807
def bind(_self, **kwargs): return Logger( {**_self._extra, **kwargs}, _self._exception, _self._record, _self._lazy, _self._ansi, _self._raw, _self._depth, )
Bind attributes to the ``extra`` dict of each logged message record. This is used to add custom context to each logging call. Parameters ---------- **kwargs Mapping between keys and values that will be added to the ``extra`` dict. Returns ------- :class:`~Logger` A logger wrapping the core logger, but which sends record with the customized ``extra`` dict. Examples -------- >>> logger.add(sys.stderr, format="{extra[ip]} - {message}") 1 >>> class Server: ... def __init__(self, ip): ... self.ip = ip ... self.logger = logger.bind(ip=ip) ... def call(self, message): ... self.logger.info(message) ... >>> instance_1 = Server("192.168.0.200") >>> instance_2 = Server("127.0.0.1") >>> instance_1.call("First instance") 192.168.0.200 - First instance >>> instance_2.call("Second instance") 127.0.0.1 - Second instance
19,808
def create_record(self, type, name, data, priority=None, port=None, weight=None, **kwargs): api = self.doapi_manager data = { "type": type, "name": name, "data": data, "priority": priority, "port": port, "weight": weight, } data.update(kwargs) return self._record(api.request(self.record_url, method=, data=data)["domain_record"])
Add a new DNS record to the domain :param str type: the type of DNS record to add (``"A"``, ``"CNAME"``, etc.) :param str name: the name (hostname, alias, etc.) of the new record :param str data: the value of the new record :param int priority: the priority of the new record (SRV and MX records only) :param int port: the port that the service is accessible on (SRV records only) :param int weight: the weight of records with the same priority (SRV records only) :param kwargs: additional fields to include in the API request :return: the new domain record :rtype: DomainRecord :raises DOAPIError: if the API endpoint replies with an error
19,809
def _get_dimension_scales(self, dimension, preserve_domain=False): if preserve_domain: return [ self.scales[k] for k in self.scales if ( k in self.scales_metadata and self.scales_metadata[k].get() == dimension and not self.preserve_domain.get(k) ) ] else: return [ self.scales[k] for k in self.scales if ( k in self.scales_metadata and self.scales_metadata[k].get() == dimension ) ]
Return the list of scales corresponding to a given dimension. The preserve_domain optional argument specifies whether one should filter out the scales for which preserve_domain is set to True.
19,810
def tool_classpath_from_products(products, key, scope): callback_product_map = products.get_data() or {} callback = callback_product_map.get(scope, {}).get(key) if not callback: raise TaskError( .format(key=key, scope=scope)) return callback()
Get a classpath for the tool previously registered under key in the given scope. :param products: The products of the current pants run. :type products: :class:`pants.goal.products.Products` :param string key: The key the tool configuration was registered under. :param string scope: The scope the tool configuration was registered under. :returns: A list of paths. :rtype: list
19,811
def nvmlDeviceGetMultiGpuBoard(handle): r c_multiGpu = c_uint(); fn = _nvmlGetFunctionPointer("nvmlDeviceGetMultiGpuBoard") ret = fn(handle, byref(c_multiGpu)) _nvmlCheckReturn(ret) return bytes_to_str(c_multiGpu.value)
r""" /** * Retrieves whether the device is on a Multi-GPU Board * Devices that are on multi-GPU boards will set \a multiGpuBool to a non-zero value. * * For Fermi &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param multiGpuBool Reference in which to return a zero or non-zero value * to indicate whether the device is on a multi GPU board * * @return * - \ref NVML_SUCCESS if \a multiGpuBool has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a multiGpuBool is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetMultiGpuBoard
19,812
def idle_send_acks_and_nacks(self): max_blocks_to_send = 10 blocks_sent = 0 i = 0 now = time.time() while (i < len(self.blocks_to_ack_and_nack) and blocks_sent < max_blocks_to_send): stuff = self.blocks_to_ack_and_nack[i] [master, block, status, first_sent, last_sent] = stuff if status == 1: mavstatus = mavutil.mavlink.MAV_REMOTE_LOG_DATA_BLOCK_ACK (target_sys, target_comp) = self.sender self.master.mav.remote_log_block_status_send(target_sys, target_comp, block, mavstatus) blocks_sent += 1 del self.acking_blocks[block] del self.blocks_to_ack_and_nack[i] continue if block not in self.missing_blocks: del self.blocks_to_ack_and_nack[i] continue if (self.last_seqno - block > 200) or (now - first_sent > 60): if self.log_settings.verbose: print("DFLogger: Abandoning block (%d)" % (block,)) del self.blocks_to_ack_and_nack[i] del self.missing_blocks[block] self.abandoned += 1 continue i += 1 if last_sent is not None: if now - last_sent < 0.1: continue if self.log_settings.verbose: print("DFLogger: Asking for block (%d)" % (block,)) mavstatus = mavutil.mavlink.MAV_REMOTE_LOG_DATA_BLOCK_NACK (target_sys, target_comp) = self.sender self.master.mav.remote_log_block_status_send(target_sys, target_comp, block, mavstatus) blocks_sent += 1 stuff[4] = now
Send packets to UAV in idle loop
19,813
def add_dashboard_panel(self, dashboard, name, panel_type, metrics, scope=None, sort_by=None, limit=None, layout=None): panel_configuration = { : name, : None, : None, : [], : { : 1, : 1, : 12, : 6 } } if panel_type == : verify=self.ssl_verify) return self._request_result(res)
**Description** Adds a panel to the dashboard. A panel can be a time series, or a top chart (i.e. bar chart), or a number panel. **Arguments** - **dashboard**: dashboard to edit - **name**: name of the new panel - **panel_type**: type of the new panel. Valid values are: ``timeSeries``, ``top``, ``number`` - **metrics**: a list of dictionaries, specifying the metrics to show in the panel, and optionally, if there is only one metric, a grouping key to segment that metric by. A metric is any of the entries that can be found in the *Metrics* section of the Explore page in Sysdig Monitor. Metric entries require an *aggregations* section specifying how to aggregate the metric across time and groups of containers/hosts. A grouping key is any of the entries that can be found in the *Show* or *Segment By* sections of the Explore page in Sysdig Monitor. Refer to the examples section below for ready to use code snippets. Note, certain panels allow certain combinations of metrics and grouping keys: - ``timeSeries``: 1 or more metrics OR 1 metric + 1 grouping key - ``top``: 1 or more metrics OR 1 metric + 1 grouping key - ``number``: 1 metric only - **scope**: filter to apply to the panel; must be based on metadata available in Sysdig Monitor; Example: *kubernetes.namespace.name='production' and container.image='nginx'*. - **sort_by**: Data sorting; The parameter is optional and it's a dictionary of ``metric`` and ``mode`` (it can be ``desc`` or ``asc``) - **limit**: This parameter sets the limit on the number of lines/bars shown in a ``timeSeries`` or ``top`` panel. In the case of more entities being available than the limit, the top entities according to the sort will be shown. The default value is 10 for ``top`` panels (for ``timeSeries`` the default is defined by Sysdig Monitor itself). Note that increasing the limit above 10 is not officially supported and may cause performance and rendering issues - **layout**: Size and position of the panel. The dashboard layout is defined by a grid of 12 columns, each row height is equal to the column height. For example, say you want to show 2 panels at the top: one panel might be 6 x 3 (half the width, 3 rows height) located in row 1 and column 1 (top-left corner of the viewport), the second panel might be 6 x 3 located in row 1 and position 7. The location is specified by a dictionary of ``row`` (row position), ``col`` (column position), ``size_x`` (width), ``size_y`` (height). **Success Return Value** A dictionary showing the details of the edited dashboard. **Example** `examples/dashboard.py <https://github.com/draios/python-sdc-client/blob/master/examples/dashboard.py>`_
19,814
def get_build_output(self, process): while True: output = process.stdout.readline() if output == b and process.poll() is not None: if process.returncode > 0: raise Exception("Compilation ended with an error" ".\nSTDERR\n%s\nSTDOUT\n%s" % (process.stderr.read(), process.stdout.read())) return if output: matches = re.search(r, output.strip().decode()) if matches is not None: yield [int(matches.group(1)), int(matches.group(2))]
Parse the output of the ns-3 build process to extract the information that is needed to draw the progress bar. Args: process: the subprocess instance to listen to.
19,815
def _mkOp(fn): def op(*operands, key=None) -> RtlSignalBase: assert operands, operands top = None if key is not None: operands = map(key, operands) for s in operands: if top is None: top = s else: top = fn(top, s) return top return op
Function to create variadic operator function :param fn: function to perform binary operation
19,816
def main(): parser = argparse.ArgumentParser(description=) parser.add_argument(, , type=int, help=, default=5556) parser.add_argument(, , help=, nargs=) parser.add_argument(, , type=str, help=) args = parser.parse_args() if args.config: _add_devices_from_config(args) if args.default and not add(, args.default): exit() app.run(host=, port=args.port)
Set up the server.
19,817
def _set_vni_any(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="vni-any", rest_name="vni-any", parent=self, choice=(u, u), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "empty", : , }) self.__vni_any = t if hasattr(self, ): self._set()
Setter method for vni_any, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/vni_any (empty) If this variable is read-only (config: false) in the source YANG file, then _set_vni_any is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vni_any() directly.
19,818
def transpose_func(classes, table): transposed_table = table for i, item1 in enumerate(classes): for j, item2 in enumerate(classes): if i > j: temp = transposed_table[item1][item2] transposed_table[item1][item2] = transposed_table[item2][item1] transposed_table[item2][item1] = temp return transposed_table
Transpose table. :param classes: classes :type classes : list :param table: input matrix :type table : dict :return: transposed table as dict
19,819
def get_next_action(self, request, application, label, roles): if label is not None: return HttpResponseBadRequest("<h1>Bad Request</h1>") actions = self.get_actions(request, application, roles) if request.method == "GET": context = self.context context.update({ : application, : actions, : self.name, : roles}) return render( template_name=, context=context, request=request) elif request.method == "POST": for action in actions: if action in request.POST: return action return HttpResponseBadRequest("<h1>Bad Request</h1>")
Django view method. We provide a default detail view for applications.
19,820
def node_done(self, ssid=None): if self.api_key is None: raise exceptions.ApiKeyRequired if ssid is None: raise exceptions.SsidRequired requested_hosts = dict() for host in self.self_inventory: if ssid == self.self_inventory[host][]: requested_hosts[host] = self.full_inventory[host] args = "key={key}&ssid={ssid}".format(key=self.api_key, ssid=ssid) resp, body = self.get( % args) return requested_hosts
Release the servers for the specified ssid. The API doesn't provide any kind of output, try to be helpful by providing the list of servers to be released. :param ssid: ssid of the server pool :return: [ requested_hosts ]
19,821
def rowsAboutToBeRemoved(self, parent, start, end): self._viewIsDirty = True super(StimulusView, self).rowsAboutToBeRemoved(parent, start, end)
Marks view for repaint. :qtdoc:`Re-implemented<QAbstractItemView.rowsAboutToBeRemoved>`
19,822
def _validate_tileset(self, tileset): if not in tileset: tileset = "{0}.{1}".format(self.username, tileset) pattern = if not re.match(pattern, tileset, flags=re.IGNORECASE): raise ValidationError( .format( tileset, pattern)) return tileset
Validate the tileset name and ensure that it includes the username
19,823
def take(self, obj): cached = self._thread_local.cache[self._get_cache_key(obj)] build_kwargs = {} if in cached and in cached: build_kwargs[] = cached[].objects.filter(pk__in=cached[]) elif in cached: if cached[].__class__.objects.filter(pk=cached[].pk).exists(): build_kwargs[] = cached[] else: build_kwargs[] = cached[].__class__.objects.none() self._clean_cache(obj) return build_kwargs
Get cached value and clean cache.
19,824
def send(self, msg, timeout=None): try: timestamp = struct.pack(, int(msg.timestamp * 1000)) except struct.error: raise ValueError() try: a_id = struct.pack(, msg.arbitration_id) except struct.error: raise ValueError() byte_msg = bytearray() byte_msg.append(0xAA) for i in range(0, 4): byte_msg.append(timestamp[i]) byte_msg.append(msg.dlc) for i in range(0, 4): byte_msg.append(a_id[i]) for i in range(0, msg.dlc): byte_msg.append(msg.data[i]) byte_msg.append(0xBB) self.ser.write(byte_msg)
Send a message over the serial device. :param can.Message msg: Message to send. .. note:: Flags like ``extended_id``, ``is_remote_frame`` and ``is_error_frame`` will be ignored. .. note:: If the timestamp is a float value it will be converted to an integer. :param timeout: This parameter will be ignored. The timeout value of the channel is used instead.
19,825
def has_mixture_channel(val: Any) -> bool: mixture_getter = getattr(val, , None) result = NotImplemented if mixture_getter is None else mixture_getter() if result is not NotImplemented: return result result = has_unitary(val) if result is not NotImplemented and result: return result return mixture_channel(val, None) is not None
Returns whether the value has a mixture channel representation. In contrast to `has_mixture` this method falls back to checking whether the value has a unitary representation via `has_channel`. Returns: If `val` has a `_has_mixture_` method and its result is not NotImplemented, that result is returned. Otherwise, if `val` has a `_has_unitary_` method and its results is not NotImplemented, that result is returned. Otherwise, if the value has a `_mixture_` method that is not a non-default value, True is returned. Returns False if none of these functions.
19,826
def guess_xml_encoding(self, content): r matchobj = self.__regex[].match(content) return matchobj and matchobj.group(1).lower()
r"""Guess encoding from xml header declaration. :param content: xml content :rtype: str or None
19,827
def clean(self): if self.clean_level == : idx, = np.where(self.data.altitude <= 550) self.data = self[idx,:] self.data.replace(-999999., np.nan, inplace=True) if (self.clean_level == ) | (self.clean_level == ): try: idx, = np.where(np.abs(self.data.ionVelmeridional) < 10000.) self.data = self[idx,:] except AttributeError: pass if self.clean_level == : idx, = np.where(self.data.RPAflag <= 1) self.data = self[idx,:] self.data = self.data[ (self.data.driftMeterflag<= 3) ] else: idx, = np.where(self.data.RPAflag <= 0) self.data = self[idx,:] self.data = self.data[ (self.data.driftMeterflag<= 0) ] if self.clean_level == : idx, = np.where(self.data.RPAflag <= 4) self.data = self[idx,:] self.data = self.data[ (self.data.driftMeterflag<= 6) ] idx, = np.where(self.data.time <= 86400.) self.data = self[idx,:] idx, = np.where((self.data.mlt >= 0) & (self.data.mlt <= 24.)) self.data = self[idx,:] return
Routine to return C/NOFS IVM data cleaned to the specified level Parameters ----------- inst : (pysat.Instrument) Instrument class object, whose attribute clean_level is used to return the desired level of data selectivity. Returns -------- Void : (NoneType) data in inst is modified in-place. Notes -------- Supports 'clean', 'dusty', 'dirty'
19,828
def _filehandle(self): if not self._fh or self._is_closed(): filename = self._rotated_logfile or self.filename if filename.endswith(): self._fh = gzip.open(filename, ) else: self._fh = open(filename, "r", 1) if self.read_from_end and not exists(self._offset_file): self._fh.seek(0, os.SEEK_END) else: self._fh.seek(self._offset) return self._fh
Return a filehandle to the file being tailed, with the position set to the current offset.
19,829
def isCompatible(self, other, cls): if not isinstance(other, cls): raise TypeError( % (cls.__name__, other.__class__.__name__)) reporter = self.compatibilityReporterClass(self, other) self._isCompatible(other, reporter) return not reporter.fatal, reporter
Evaluate interpolation compatibility with other.
19,830
def saddr(address): if isinstance(address, six.string_types): return address elif isinstance(address, tuple) and len(address) >= 2 and in address[0]: return .format(address[0], address[1]) elif isinstance(address, tuple) and len(address) >= 2: return .format(*address) else: raise TypeError(.format(type(address)))
Return a string representation for an address. The *address* paramater can be a pipe name, an IP address tuple, or a socket address. The return value is always a ``str`` instance.
19,831
def _split_header(header): params = {} parts = header.split() for param in parts: if param.find() > -1: continue param = param.strip() param_parts = param.split(, 1) params[param_parts[0]] = unquote(param_parts[1].strip()) return params
Turn Authorization: header into parameters.
19,832
def solid_angle(center, coords): r = [np.subtract(c, center) for c in coords] r_norm = [np.linalg.norm(i) for i in r] angle = 0 for i in range(1, len(r) - 1): j = i + 1 tp = np.abs(np.dot(r[0], np.cross(r[i], r[j]))) de = r_norm[0] * r_norm[i] * r_norm[j] + \ r_norm[j] * np.dot(r[0], r[i]) + \ r_norm[i] * np.dot(r[0], r[j]) + \ r_norm[0] * np.dot(r[i], r[j]) if de == 0: my_angle = 0.5 * pi if tp > 0 else -0.5 * pi else: my_angle = np.arctan(tp / de) angle += (my_angle if my_angle > 0 else my_angle + np.pi) * 2 return angle
Helper method to calculate the solid angle of a set of coords from the center. Args: center (3x1 array): Center to measure solid angle from. coords (Nx3 array): List of coords to determine solid angle. Returns: The solid angle.
19,833
def create(parallel): queue = {k: v for k, v in parallel.items() if k in ["queue", "cores_per_job", "mem"]} yield queue
Create a queue based on the provided parallel arguments. TODO Startup/tear-down. Currently using default queue for testing
19,834
def _set_show_firmware_option(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=show_firmware_option.show_firmware_option, is_container=, presence=False, yang_name="show-firmware-option", rest_name="firmware", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__show_firmware_option = t if hasattr(self, ): self._set()
Setter method for show_firmware_option, mapped from YANG variable /show/show_firmware_dummy/show_firmware_option (container) If this variable is read-only (config: false) in the source YANG file, then _set_show_firmware_option is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_firmware_option() directly.
19,835
def check_docstring_sections(self, definition, docstring): if not docstring: return lines = docstring.split("\n") if len(lines) < 2: return lower_section_names = [s.lower() for s in self.SECTION_NAMES] def _suspected_as_section(_line): result = self._get_leading_words(_line.lower()) return result in lower_section_names suspected_section_indices = [i for i, line in enumerate(lines) if _suspected_as_section(line)] SectionContext = namedtuple(, (, , , , , )) contexts = (SectionContext(self._get_leading_words(lines[i].strip()), lines[i - 1], lines[i], lines[i + 1:], i, False) for i in suspected_section_indices) contexts = (c for c in contexts if self._is_a_docstring_section(c)) for a, b in pairwise(contexts, None): end = -1 if b is None else b.original_index new_ctx = SectionContext(a.section_name, a.previous_line, a.line, lines[a.original_index + 1:end], a.original_index, b is None) for err in self._check_section(docstring, definition, new_ctx): yield err
D21{4,5}, D4{05,06,07,08,09,10}: Docstring sections checks. Check the general format of a sectioned docstring: '''This is my one-liner. Short Summary ------------- This is my summary. Returns ------- None. ''' Section names appear in `SECTION_NAMES`.
19,836
def import_key_pair(name, key, profile, key_type=None, **libcloud_kwargs): s import_key_pair_from_xxx method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_compute.import_key_pair pair1 key_value_data123 profile1 salt myminion libcloud_compute.import_key_pair pair1 /path/to/key profile1 FILE': return _simple_key_pair(conn.import_key_pair_from_file(name, key, **libcloud_kwargs)) else: return _simple_key_pair(conn.import_key_pair_from_string(name, key, **libcloud_kwargs))
Import a new public key from string or a file path :param name: Key pair name. :type name: ``str`` :param key: Public key material, the string or a path to a file :type key: ``str`` or path ``str`` :param profile: The profile key :type profile: ``str`` :param key_type: The key pair type, either `FILE` or `STRING`. Will detect if not provided and assume that if the string is a path to an existing path it is a FILE, else STRING. :type key_type: ``str`` :param libcloud_kwargs: Extra arguments for the driver's import_key_pair_from_xxx method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_compute.import_key_pair pair1 key_value_data123 profile1 salt myminion libcloud_compute.import_key_pair pair1 /path/to/key profile1
19,837
def _skip_trampoline(handler): data_event, self = (yield None) delegate = handler event = None depth = 0 while True: def pass_through(): _trans = delegate.send(Transition(data_event, delegate)) return _trans, _trans.delegate, _trans.event if data_event is not None and data_event.type is ReadEventType.SKIP: while True: trans, delegate, event = pass_through() if event is not None: if event.event_type is IonEventType.CONTAINER_END and event.depth <= depth: break if event is None or event.event_type is IonEventType.INCOMPLETE: data_event, _ = yield Transition(event, self) else: trans, delegate, event = pass_through() if event is not None and (event.event_type is IonEventType.CONTAINER_START or event.event_type is IonEventType.CONTAINER_END): depth = event.depth data_event, _ = yield Transition(event, self)
Intercepts events from container handlers, emitting them only if they should not be skipped.
19,838
def add_node(node, **kwds): nodes._add_node_class_names([node.__name__]) for key, val in kwds.iteritems(): try: visit, depart = val except ValueError: raise ValueError( % key) if key == : from docutils.writers.html4css1 import HTMLTranslator as translator elif key == : from docutils.writers.latex2e import LaTeXTranslator as translator else: continue setattr(translator, +node.__name__, visit) if depart: setattr(translator, +node.__name__, depart)
add_node from Sphinx
19,839
def is_quote_artifact(orig_text, span): res = False cursor = re.finditer(r)[^ .,:;?!()*+-].*?("|\, orig_text) for item in cursor: if item.span()[1] == span[1]: res = True return res
Distinguish between quotes and units.
19,840
def process_raw_data(cls, raw_data): properties = raw_data.get("properties", {}) raw_content = properties.get("ipConfiguration", None) if raw_content is not None: resource = Resource.from_raw_data(raw_content) properties["ipConfiguration"] = resource return super(PublicIPAddresses, cls).process_raw_data(raw_data)
Create a new model using raw API response.
19,841
def maybe_timeout_options(self): if self._exit_timeout_start_time: return NailgunProtocol.TimeoutOptions(self._exit_timeout_start_time, self._exit_timeout) else: return None
Implements the NailgunProtocol.TimeoutProvider interface.
19,842
def timestamp_datetime(self, timestamp): format = timestamp = time.localtime(timestamp) return time.strftime(format, timestamp)
将uninx时间戳转换为可读性的时间
19,843
def get_memory_annotations(cls, exclude=None): result = set() annotations_in_memory = Annotation.__ANNOTATIONS_IN_MEMORY__ exclude = () if exclude is None else exclude for annotation_cls in annotations_in_memory: if issubclass(annotation_cls, exclude): continue if issubclass(annotation_cls, cls): result |= annotations_in_memory[annotation_cls] return result
Get annotations in memory which inherits from cls. :param tuple/type exclude: annotation type(s) to exclude from search. :return: found annotations which inherits from cls. :rtype: set
19,844
def load_data_split(proc_data_dir): ds_train = Dataset.load(path.join(proc_data_dir, )) ds_val = Dataset.load(path.join(proc_data_dir, )) ds_test = Dataset.load(path.join(proc_data_dir, )) return ds_train, ds_val, ds_test
Loads a split dataset Args: proc_data_dir: Directory with the split and processed data Returns: (Training Data, Validation Data, Test Data)
19,845
def _build(self, inputs, is_training): input_shape = tf.shape(inputs) with tf.control_dependencies([ tf.Assert(tf.equal(input_shape[-1], self._embedding_dim), [input_shape])]): flat_inputs = tf.reshape(inputs, [-1, self._embedding_dim]) distances = (tf.reduce_sum(flat_inputs**2, 1, keepdims=True) - 2 * tf.matmul(flat_inputs, self._w) + tf.reduce_sum(self._w ** 2, 0, keepdims=True)) encoding_indices = tf.argmax(- distances, 1) encodings = tf.one_hot(encoding_indices, self._num_embeddings) encoding_indices = tf.reshape(encoding_indices, tf.shape(inputs)[:-1]) quantized = self.quantize(encoding_indices) e_latent_loss = tf.reduce_mean((tf.stop_gradient(quantized) - inputs) ** 2) q_latent_loss = tf.reduce_mean((quantized - tf.stop_gradient(inputs)) ** 2) loss = q_latent_loss + self._commitment_cost * e_latent_loss quantized = inputs + tf.stop_gradient(quantized - inputs) avg_probs = tf.reduce_mean(encodings, 0) perplexity = tf.exp(- tf.reduce_sum(avg_probs * tf.log(avg_probs + 1e-10))) return {: quantized, : loss, : perplexity, : encodings, : encoding_indices,}
Connects the module to some inputs. Args: inputs: Tensor, final dimension must be equal to embedding_dim. All other leading dimensions will be flattened and treated as a large batch. is_training: boolean, whether this connection is to training data. Returns: dict containing the following keys and values: quantize: Tensor containing the quantized version of the input. loss: Tensor containing the loss to optimize. perplexity: Tensor containing the perplexity of the encodings. encodings: Tensor containing the discrete encodings, ie which element of the quantized space each input element was mapped to. encoding_indices: Tensor containing the discrete encoding indices, ie which element of the quantized space each input element was mapped to.
19,846
def compute(self): for varname in self.tendencies: self.tendencies[varname] *= 0. if not self.has_process_type_list: self._build_process_type_list() tendencies = {} ignored = self._compute_type() tendencies[] = self._compute_type() for name, var in self.state.items(): var += tendencies[][name] * self.timestep tendencies[] = self._compute_type() for name, var in self.state.items(): var += tendencies[][name] * self.timestep tendencies[] = self._compute_type() for name, var in self.state.items(): var -= ( (tendencies[][name] + tendencies[][name]) * self.timestep) for proctype in [, , ]: for varname, tend in tendencies[proctype].items(): self.tendencies[varname] += tend self_tend = self._compute() if self.time_type is : for varname, adj in self_tend.items(): self_tend[varname] /= self.timestep for varname, tend in self_tend.items(): self.tendencies[varname] += tend return self.tendencies
Computes the tendencies for all state variables given current state and specified input. The function first computes all diagnostic processes. They don't produce any tendencies directly but they may affect the other processes (such as change in solar distribution). Subsequently, all tendencies and diagnostics for all explicit processes are computed. Tendencies due to implicit and adjustment processes need to be calculated from a state that is already adjusted after explicit alteration. For that reason the explicit tendencies are applied to the states temporarily. Now all tendencies from implicit processes are calculated by matrix inversions and similar to the explicit tendencies, the implicit ones are applied to the states temporarily. Subsequently, all instantaneous adjustments are computed. Then the changes that were made to the states from explicit and implicit processes are removed again as this :class:`~climlab.process.time_dependent_process.TimeDependentProcess.compute()` function is supposed to calculate only tendencies and not apply them to the states. Finally, all calculated tendencies from all processes are collected for each state, summed up and stored in the dictionary ``self.tendencies``, which is an attribute of the time-dependent-process object, for which the :class:`~climlab.process.time_dependent_process.TimeDependentProcess.compute()` method has been called. **Object attributes** \n During method execution following object attributes are modified: :ivar dict tendencies: dictionary that holds tendencies for all states is calculated for current timestep through adding up tendencies from explicit, implicit and adjustment processes. :ivar dict diagnostics: process diagnostic dictionary is updated by diagnostic dictionaries of subprocesses after computation of tendencies.
19,847
def multiline_merge(lines, current_event, re_after, re_before): events = [] for line in lines: if re_before and re_before.match(line): current_event.append(line) elif re_after and current_event and re_after.match(current_event[-1]): current_event.append(line) else: if current_event: events.append(.join(current_event)) current_event.clear() current_event.append(line) return events
Merge multi-line events based. Some event (like Python trackback or Java stracktrace) spawn on multiple line. This method will merge them using two regular expression: regex_after and regex_before. If a line match re_after, it will be merged with next line. If a line match re_before, it will be merged with previous line. This function return a list of complet event. Note that because we don't know if an event is complet before another new event start, the last event will not be returned but stored in current_event. You should pass the same current_event to successive call to multiline_merge. current_event is a list of lines whose belong to the same event.
19,848
def from_dict(input_dict, data=None): import GPy m = GPy.core.model.Model.from_dict(input_dict, data) from copy import deepcopy sparse_gp = deepcopy(m) return SparseGPClassification(sparse_gp.X, sparse_gp.Y, sparse_gp.Z, sparse_gp.kern, sparse_gp.likelihood, sparse_gp.inference_method, sparse_gp.mean_function, name=)
Instantiate an SparseGPClassification object using the information in input_dict (built by the to_dict method). :param data: It is used to provide X and Y for the case when the model was saved using save_data=False in to_dict method. :type data: tuple(:class:`np.ndarray`, :class:`np.ndarray`)
19,849
def apply_new_global_variable_name(self, path, new_gv_name): gv_name = self.list_store[path][self.NAME_STORAGE_ID] if gv_name == new_gv_name or not self.global_variable_is_editable(gv_name, ): return data_value = self.model.global_variable_manager.get_representation(gv_name) data_type = self.model.global_variable_manager.get_data_type(gv_name) try: self.model.global_variable_manager.delete_variable(gv_name) self.model.global_variable_manager.set_variable(new_gv_name, data_value, data_type=data_type) gv_name = new_gv_name except (AttributeError, RuntimeError, TypeError) as e: logger.warning("Can not apply new name ".format(e)) self.update_global_variables_list_store() self.select_entry(gv_name) if hasattr(self.tree_view_keypress_callback.__func__, "core_element_id"): self.tree_view_keypress_callback.__func__.core_element_id = gv_name
Change global variable name/key according handed string Updates the global variable name only if different and already in list store. :param path: The path identifying the edited global variable tree view row, can be str, int or tuple. :param str new_gv_name: New global variable name
19,850
def get_template(self, context, **kwargs): if in kwargs[]: self.template = kwargs[][] return super(GoscaleTemplateInclusionTag, self).get_template(context, **kwargs)
Returns the template to be used for the current context and arguments.
19,851
def capture_message(sock, get_channel=False): try: if get_channel: if HAS_NATIVE_SUPPORT: cf, addr = sock.recvfrom(CANFD_MTU) channel = addr[0] if isinstance(addr, tuple) else addr else: data = ctypes.create_string_buffer(CANFD_MTU) addr = ctypes.create_string_buffer(32) addrlen = ctypes.c_int(len(addr)) received = libc.recvfrom(sock.fileno(), data, len(data), 0, addr, ctypes.byref(addrlen)) cf = data.raw[:received] family, ifindex = struct.unpack_from("Hi", addr.raw) assert family == AF_CAN data = struct.pack("16xi", ifindex) res = fcntl.ioctl(sock, SIOCGIFNAME, data) channel = ctypes.create_string_buffer(res).value.decode() else: cf = sock.recv(CANFD_MTU) channel = None except socket.error as exc: raise can.CanError("Error receiving: %s" % exc) can_id, can_dlc, flags, data = dissect_can_frame(cf) binary_structure = "@LL" res = fcntl.ioctl(sock, SIOCGSTAMP, struct.pack(binary_structure, 0, 0)) seconds, microseconds = struct.unpack(binary_structure, res) timestamp = seconds + microseconds * 1e-6 is_extended_frame_format = bool(can_id & CAN_EFF_FLAG) is_remote_transmission_request = bool(can_id & CAN_RTR_FLAG) is_error_frame = bool(can_id & CAN_ERR_FLAG) is_fd = len(cf) == CANFD_MTU bitrate_switch = bool(flags & CANFD_BRS) error_state_indicator = bool(flags & CANFD_ESI) if is_extended_frame_format: arbitration_id = can_id & 0x1FFFFFFF else: arbitration_id = can_id & 0x000007FF msg = Message(timestamp=timestamp, channel=channel, arbitration_id=arbitration_id, is_extended_id=is_extended_frame_format, is_remote_frame=is_remote_transmission_request, is_error_frame=is_error_frame, is_fd=is_fd, bitrate_switch=bitrate_switch, error_state_indicator=error_state_indicator, dlc=can_dlc, data=data) return msg
Captures a message from given socket. :param socket.socket sock: The socket to read a message from. :param bool get_channel: Find out which channel the message comes from. :return: The received message, or None on failure.
19,852
def on_connected(self, headers, body): if in headers: self.heartbeats = utils.calculate_heartbeats( headers[].replace(, ).split(), self.heartbeats) if self.heartbeats != (0, 0): self.send_sleep = self.heartbeats[0] / 1000 self.receive_sleep = (self.heartbeats[1] / 1000) * self.heart_beat_receive_scale log.debug("Setting receive_sleep to %s", self.receive_sleep) self.received_heartbeat = monotonic() + self.receive_sleep self.running = True if self.heartbeat_thread is None: self.heartbeat_thread = utils.default_create_thread( self.__heartbeat_loop) self.heartbeat_thread.name = "StompHeartbeat%s" % \ getattr(self.heartbeat_thread, "name", "Thread")
Once the connection is established, and 'heart-beat' is found in the headers, we calculate the real heartbeat numbers (based on what the server sent and what was specified by the client) - if the heartbeats are not 0, we start up the heartbeat loop accordingly. :param dict headers: headers in the connection message :param body: the message body
19,853
def merge_segments(filename, scan, cleanup=True, sizelimit=0): workdir = os.path.dirname(filename) fileroot = os.path.basename(filename) candslist = glob.glob(os.path.join(workdir, + fileroot + + str(scan) + )) noiselist = glob.glob(os.path.join(workdir, + fileroot + + str(scan) + )) candssegs = sorted([candsfile.rstrip().split()[1] for candsfile in candslist]) noisesegs = sorted([noisefile.rstrip().split()[1] for noisefile in noiselist]) if not candslist and not noiselist: logger.warn() return if not os.path.exists(os.path.join(workdir, + fileroot + + str(scan) + )): logger.info( % (str(candssegs), fileroot, scan)) logger.debug( % candslist) cands = {} for candsfile in candslist: with open(candsfile, ) as pkl: state = pickle.load(pkl) result = pickle.load(pkl) for kk in result.keys(): cands[kk] = result[kk] segment = state.pop() if sizelimit and len(cands): logger.debug() if in state[]: snrcol = state[].index() elif in state[]: snrcol = state[].index() candsize = sys.getsizeof(cands[cands.keys()[0]])/1e6 maxlen = int(sizelimit/candsize) if len(cands) > maxlen: logger.info( % (len(cands), sizelimit, maxlen)) snrs = [abs(cands[k][snrcol]) for k in cands.iterkeys()] snrsort = sorted(snrs, reverse=True) snrmax = snrsort[maxlen] cands = {k: v for k,v in cands.items() if abs(v[snrcol]) > snrmax} with open(os.path.join(workdir, + fileroot + + str(scan) + ), ) as pkl: pickle.dump(state, pkl, protocol=2) pickle.dump( (np.array(cands.keys()), np.array(cands.values())), pkl, protocol=2) if cleanup: if os.path.exists(os.path.join(workdir, + fileroot + + str(scan) + )): for candsfile in candslist: os.remove(candsfile) else: logger.warn( % scan) if not os.path.exists(os.path.join(workdir, + fileroot + + str(scan) + )): logger.info( % (str(noisesegs), fileroot, scan)) logger.debug( % noiselist) noise = [] for noisefile in noiselist: with open(noisefile, ) as pkl: result = pickle.load(pkl) noise += result if len(noise): with open(os.path.join(workdir, + fileroot + + str(scan) + ), ) as pkl: pickle.dump(noise, pkl, protocol=2) if cleanup: if os.path.exists(os.path.join(workdir, + fileroot + + str(scan) + )): for noisefile in noiselist: os.remove(noisefile) else: logger.warn( % scan)
Merges cands/noise pkl files from multiple segments to single cands/noise file. Expects segment cands pkls with have (1) state dict and (2) cands dict. Writes tuple state dict and duple of numpy arrays A single pkl written per scan using root name fileroot. if cleanup, it will remove segments after merging. if sizelimit, it will reduce the output file to be less than this many MB.
19,854
def one_of(s): @Parser def one_of_parser(text, index=0): if index < len(text) and text[index] in s: return Value.success(index + 1, text[index]) else: return Value.failure(index, .format(s)) return one_of_parser
Parser a char from specified string.
19,855
def get_access_token(client_id, client_secret): headers = {: } payload = { : client_id, : client_secret } request = requests.post(token_url, data=payload, headers=headers) if request.status_code == 200: token = request.json() return token return {: request.status_code, "message": request.text}
Name: token Parameters: client_id, client_secret Return: dictionary
19,856
def type(self): properties = {self.is_code: "code", self.is_data: "data", self.is_string: "string", self.is_tail: "tail", self.is_unknown: "unknown"} for k, v in properties.items(): if k: return v
return the type of the Line
19,857
def get_standard_form(self, data): if self.synonym_map is None: return data from indic_transliteration import sanscript return sanscript.transliterate(data=sanscript.transliterate(_from=self.name, _to=sanscript.DEVANAGARI, data=data), _from=sanscript.DEVANAGARI, _to=self.name)
Roman schemes define multiple representations of the same devanAgarI character. This method gets a library-standard representation. data : a text in the given scheme.
19,858
def currencyFormat(_context, code, symbol, format, currency_digits=True, decimal_quantization=True, name=): _context.action( discriminator=(, name, code), callable=_register_currency, args=(name, code, symbol, format, currency_digits, decimal_quantization) )
Handle currencyFormat subdirectives.
19,859
def is_locked(self): if self.provider.lock_manager is None: return False return self.provider.lock_manager.is_url_locked(self.get_ref_url())
Return True, if URI is locked.
19,860
def send_static_file(self, filename): if self.config[] == : abort(404) theme_static_folder = getattr(self, , None) if theme_static_folder: try: return send_from_directory(theme_static_folder, filename) except NotFound: pass return super(CustomFlask, self).send_static_file(filename)
Send static files from the static folder in the current selected theme prior to the global static folder. :param filename: static filename :return: response object
19,861
def collect_hunt_results(self, hunt): if not os.path.isdir(self.output_path): os.makedirs(self.output_path) output_file_path = os.path.join( self.output_path, .join((self.hunt_id, ))) if os.path.exists(output_file_path): print(.format(output_file_path)) return None self._check_approval_wrapper( hunt, self._get_and_write_archive, hunt, output_file_path) results = self._extract_hunt_results(output_file_path) print(.format( hunt.hunt_id, output_file_path)) return results
Download current set of files in results. Args: hunt: The GRR hunt object to download files from. Returns: list: tuples containing: str: human-readable description of the source of the collection. For example, the name of the source host. str: path to the collected data. Raises: ValueError: if approval is needed and approvers were not specified.
19,862
def _read_vector(ctx: ReaderContext) -> vector.Vector: start = ctx.reader.advance() assert start == "[" return _read_coll(ctx, vector.vector, "]", "vector")
Read a vector element from the input stream.
19,863
def make_tmp_name(name): path, base = os.path.split(name) tmp_base = ".tmp-%s-%s" % (base, uuid4().hex) tmp_name = os.path.join(path, tmp_base) try: yield tmp_name finally: safe_remove(tmp_name)
Generates a tmp name for a file or dir. This is a tempname that sits in the same dir as `name`. If it exists on disk at context exit time, it is deleted.
19,864
def spendables_for_address(address, netcode, format=None): if format: method = "as_%s" % format for m in service_provider_methods("spendables_for_address", get_default_providers_for_netcode(netcode)): try: spendables = m(address) if format: spendables = [getattr(s, method)() for s in spendables] return spendables except Exception: pass return []
Return a list of Spendable objects for the given bitcoin address. Set format to "text" or "dict" to transform return value from an object to a string or dict. This is intended to be a convenience function. There is no way to know that the list returned is a complete list of spendables for the address in question. You can verify that they really do come from the existing transaction by calling tx_utils.validate_unspents.
19,865
def _write_cdx_field(self, record, raw_file_record_size, raw_file_offset): if record.fields[WARCRecord.WARC_TYPE] != WARCRecord.RESPONSE \ or not re.match(r, record.fields[WARCRecord.CONTENT_TYPE]): return url = record.fields[] _logger.debug(, url) http_header = record.get_http_header() if http_header: mime_type = self.parse_mimetype( http_header.fields.get(, ) ) or response_code = str(http_header.status_code) else: mime_type = response_code = timestamp = str(int( wpull.util.parse_iso8601_str(record.fields[WARCRecord.WARC_DATE]) )) checksum = record.fields.get(, ) if checksum.startswith(): checksum = checksum.replace(, , 1) else: checksum = raw_file_record_size_str = str(raw_file_record_size) raw_file_offset_str = str(raw_file_offset) filename = os.path.basename(self._warc_filename) record_id = record.fields[WARCRecord.WARC_RECORD_ID] fields_strs = ( url, timestamp, mime_type, response_code, checksum, raw_file_record_size_str, raw_file_offset_str, filename, record_id ) with open(self._cdx_filename, mode=, encoding=) as out_file: out_file.write(self.CDX_DELIMINATOR.join(fields_strs)) out_file.write()
Write the CDX field if needed.
19,866
def ptmsiReallocationComplete(): a = TpPd(pd=0x3) b = MessageType(mesType=0x11) packet = a / b return packet
P-TMSI REALLOCATION COMPLETE Section 9.4.8
19,867
def sprand(m, n, density, format=): m, n = int(m), int(n) A = _rand_sparse(m, n, density, format=) A.data = sp.rand(A.nnz) return A.asformat(format)
Return a random sparse matrix. Parameters ---------- m, n : int shape of the result density : float target a matrix with nnz(A) = m*n*density, 0<=density<=1 format : string sparse matrix format to return, e.g. 'csr', 'coo', etc. Return ------ A : sparse matrix m x n sparse matrix Examples -------- >>> from pyamg.gallery import sprand >>> A = sprand(5,5,3/5.0)
19,868
def _verifyHostKey(self, hostKey, fingerprint): if fingerprint in self.knownHosts: return defer.succeed(True) return defer.fail(UnknownHostKey(hostKey, fingerprint))
Called when ssh transport requests us to verify a given host key. Return a deferred that callback if we accept the key or errback if we decide to reject it.
19,869
def add_extra_headers(self, sample_names): if not sample_names: return [] full_headers = list(self.orient_data[sample_names[0]].keys()) add_ons = [] for head in full_headers: if head not in self.header_names: add_ons.append((head, head)) return add_ons
If there are samples, add any additional keys they might use to supplement the default headers. Return the headers headers for adding, with the format: [(header_name, header_display_name), ....]
19,870
def get_elb_names(self, region, config): region_dict = config.get(, {}).get(region, {}) if not in region_dict: elb_conn = boto.ec2.elb.connect_to_region(region, **self.auth_kwargs) full_elb_names = \ [elb.name for elb in elb_conn.get_all_load_balancers()] matchers = \ [re.compile(regex) for regex in config.get(, [])] return elb_names
:param region: name of a region :param config: Collector config dict :return: list of elb names to query in the given region
19,871
def send_login_code(self, code, context, **kwargs): from_number = self.from_number or getattr(settings, ) sms_content = render_to_string(self.template_name, context) self.twilio_client.messages.create( to=code.user.phone_number, from_=from_number, body=sms_content )
Send a login code via SMS
19,872
def prune(self, root): max_entries_per_target = self._max_entries_per_target if os.path.isdir(root) and max_entries_per_target: safe_rm_oldest_items_in_dir(root, max_entries_per_target)
Prune stale cache files If the option --cache-target-max-entry is greater than zero, then prune will remove all but n old cache files for each target/task. :param str root: The path under which cacheable artifacts will be cleaned
19,873
def _decode_response(response): content_type = response.headers.get(, ) logger.debug("status[%s] content_type[%s] encoding[%s]" % (response.status_code, content_type, response.encoding)) response.raise_for_status() content = response.content.strip() if response.encoding: content = content.decode(response.encoding) if not content: logger.debug("no content in response") return content if content_type.split()[0] != : return content if content.startswith(GERRIT_MAGIC_JSON_PREFIX): content = content[len(GERRIT_MAGIC_JSON_PREFIX):] try: return json.loads(content) except ValueError: logger.error(, content) raise
Strip off Gerrit's magic prefix and decode a response. :returns: Decoded JSON content as a dict, or raw text if content could not be decoded as JSON. :raises: requests.HTTPError if the response contains an HTTP error status code.
19,874
def command_drop_tables(self, meta_name=None): answer = six.moves.input(u) if answer.strip().lower()!=: sys.exit() def _drop_metadata_tables(metadata): table = next(six.itervalues(metadata.tables), None) if table is None: print() else: engine = self.session.get_bind(clause=table) drop_everything(engine) print() if isinstance(self.metadata, MetaData): print(, end=) _drop_metadata_tables(self.metadata) else: for current_meta_name, metadata in self.metadata.items(): if meta_name not in (current_meta_name, None): continue print(.format(current_meta_name), end=) _drop_metadata_tables(metadata)
Drops all tables without dropping a database:: ./manage.py sqla:drop_tables [meta_name]
19,875
def rewrite_return_as_assignments(func_node, interface): func_node = _RewriteReturn(interface).visit(func_node) ast.fix_missing_locations(func_node) return func_node
Modify FunctionDef node to directly assign instead of return.
19,876
def _peer_get_bfd(self, tx, rx, multiplier): tx = self._callback(tx, handler=) rx = self._callback(rx, handler=) multiplier = self._callback(multiplier, handler=) tx = pynos.utilities.return_xml(str(tx)) rx = pynos.utilities.return_xml(str(rx)) multiplier = pynos.utilities.return_xml(str(multiplier)) config = pynos.utilities.merge_xml(tx, rx) return pynos.utilities.merge_xml(config, multiplier)
Get and merge the `bfd` config from global BGP. You should not use this method. You probably want `BGP.bfd`. Args: tx: XML document with the XML to get the transmit interval. rx: XML document with the XML to get the receive interval. multiplier: XML document with the XML to get the interval multiplier. Returns: Merged XML document. Raises: None
19,877
def enable_performance_data(self): if not self.my_conf.process_performance_data: self.my_conf.modified_attributes |= \ DICT_MODATTR["MODATTR_PERFORMANCE_DATA_ENABLED"].value self.my_conf.process_performance_data = True self.my_conf.explode_global_conf() self.daemon.update_program_status()
Enable performance data processing (globally) Format of the line that triggers function call:: ENABLE_PERFORMANCE_DATA :return: None
19,878
def wait(self, seconds=None, **kw): self._heartbeat_thread.hurry() self._transport.set_timeout(seconds=1) warning_screen = self._yield_warning_screen(seconds) for elapsed_time in warning_screen: if self._should_stop_waiting(**kw): break try: try: self._process_packets() except TimeoutError: pass except KeyboardInterrupt: self._close() raise except ConnectionError as e: self._opened = False try: warning = Exception( % e) warning_screen.throw(warning) except StopIteration: self._warn(warning) try: namespace = self.get_namespace() namespace._find_packet_callback()() except PacketError: pass self._heartbeat_thread.relax() self._transport.set_timeout()
Wait in a loop and react to events as defined in the namespaces
19,879
def _align_orthologous_gene_pairwise(self, g_id, gapopen=10, gapextend=0.5, engine=, parse=True, force_rerun=False): protein_seqs_aln_pickle_path = op.join(self.sequences_by_gene_dir, .format(g_id)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=protein_seqs_aln_pickle_path): protein_seqs_pickle_path = self.gene_protein_pickles[g_id] protein_pickle = ssbio.io.load_pickle(protein_seqs_pickle_path) if not protein_pickle.representative_sequence: log.error(.format(g_id)) return if len(protein_pickle.sequences) < 1: log.error(.format(g_id)) return alignment_dir = op.join(self.sequences_by_gene_dir, g_id) ssbio.utils.make_dir(alignment_dir) protein_pickle.pairwise_align_sequences_to_representative(gapopen=gapopen, gapextend=gapextend, engine=engine, outdir=alignment_dir, parse=parse, force_rerun=force_rerun) protein_pickle.save_pickle(outfile=protein_seqs_aln_pickle_path) return g_id, protein_seqs_aln_pickle_path
Align orthologous strain sequences to representative Protein sequence, save as new pickle
19,880
def next_requests(self): use_set = self.settings.getbool(, defaults.START_URLS_AS_SET) fetch_one = self.server.spop if use_set else self.server.lpop found = 0 while found < self.redis_batch_size: data = fetch_one(self.redis_key) if not data: break req = self.make_request_from_data(data) if req: yield req found += 1 else: self.logger.debug("Request not made from data: %r", data) if found: self.logger.debug("Read %s requests from ", found, self.redis_key)
Returns a request to be scheduled or none.
19,881
def return_rri(self, begsam, endsam): interval = endsam - begsam dat = empty(interval) k = 0 with open(self.filename, ) as f: [next(f) for x in range(12)] for j, datum in enumerate(f): if begsam <= j < endsam: dat[k] = float64(datum[:datum.index()]) k += 1 if k == interval: break return dat
Return raw, irregularly-timed RRI.
19,882
def dumps(self, msg, use_bin_type=False): strbytes def ext_type_encoder(obj): if isinstance(obj, six.integer_types): return msgpack.ExtType(78, salt.utils.stringutils.to_bytes( obj.strftime())) elif isinstance(obj, immutabletypes.ImmutableDict): return dict(obj) elif isinstance(obj, immutabletypes.ImmutableList): return list(obj) elif isinstance(obj, (set, immutabletypes.ImmutableSet)): return obj try: if msgpack.version >= (0, 4, 0): return salt.utils.msgpack.dumps(msg, default=ext_type_encoder, use_bin_type=use_bin_type, _msgpack_module=msgpack) else: return salt.utils.msgpack.dumps(msg, default=ext_type_encoder, _msgpack_module=msgpack) except (OverflowError, msgpack.exceptions.PackValueError): context.add(objid) if isinstance(obj, dict): for key, value in six.iteritems(obj.copy()): obj[key] = verylong_encoder(value, context) return dict(obj) elif isinstance(obj, (list, tuple)): obj = list(obj) for idx, entry in enumerate(obj): obj[idx] = verylong_encoder(entry, context) return obj if isinstance(obj, six.integer_types) and obj >= pow(2, 64): return six.text_type(obj) else: return obj msg = verylong_encoder(msg, set()) if msgpack.version >= (0, 4, 0): return salt.utils.msgpack.dumps(msg, default=ext_type_encoder, use_bin_type=use_bin_type, _msgpack_module=msgpack) else: return salt.utils.msgpack.dumps(msg, default=ext_type_encoder, _msgpack_module=msgpack)
Run the correct dumps serialization format :param use_bin_type: Useful for Python 3 support. Tells msgpack to differentiate between 'str' and 'bytes' types by encoding them differently. Since this changes the wire protocol, this option should not be used outside of IPC.
19,883
def convert_meas_df_thellier_gui(meas_df_in, output): output = int(output) meas_mapping = get_thellier_gui_meas_mapping(meas_df_in, output) meas_df_out = meas_df_in.rename(columns=meas_mapping) if not in meas_df_out.columns: meas_df_out[] = meas_df_in[] return meas_df_out
Take a measurement dataframe and convert column names from MagIC 2 --> 3 or vice versa. Use treat_step_num --> measurement_number if available, otherwise measurement --> measurement_number. Parameters ---------- meas_df_in : pandas DataFrame input dataframe with measurement data output : int output to MagIC 2 or MagIC 3
19,884
def _ScheduleTasks(self, storage_writer): logger.debug() self._status = definitions.STATUS_INDICATOR_RUNNING event_source_heap = _EventSourceHeap() self._FillEventSourceHeap( storage_writer, event_source_heap, start_with_first=True) event_source = event_source_heap.PopEventSource() task = None while event_source or self._task_manager.HasPendingTasks(): if self._abort: break try: if not task: task = self._task_manager.CreateRetryTask() if not task and event_source: task = self._task_manager.CreateTask(self._session_identifier) task.file_entry_type = event_source.file_entry_type task.path_spec = event_source.path_spec event_source = None self._number_of_consumed_sources += 1 if self._guppy_memory_profiler: self._guppy_memory_profiler.Sample() if task: if self._ScheduleTask(task): logger.debug( .format( task.identifier, task.path_spec.comparable)) self._task_manager.SampleTaskStatus(task, ) task = None else: self._task_manager.SampleTaskStatus(task, ) self._MergeTaskStorage(storage_writer) if not event_source_heap.IsFull(): self._FillEventSourceHeap(storage_writer, event_source_heap) if not task and not event_source: event_source = event_source_heap.PopEventSource() except KeyboardInterrupt: self._abort = True self._processing_status.aborted = True if self._status_update_callback: self._status_update_callback(self._processing_status) for task in self._task_manager.GetFailedTasks(): warning = warnings.ExtractionWarning( message=, path_spec=task.path_spec) self._storage_writer.AddWarning(warning) self._processing_status.error_path_specs.append(task.path_spec) self._status = definitions.STATUS_INDICATOR_IDLE if self._abort: logger.debug() else: logger.debug()
Schedules tasks. Args: storage_writer (StorageWriter): storage writer for a session storage.
19,885
def set_order(self, order): m = gtk.ListStore(bool, str) for item in order: m.append( (item[], item[]) ) self.set_model(m)
Takes a list of dictionaries. Those correspond to the arguments of `list.sort` and must contain the keys 'key' and 'reverse' (a boolean). You must call `set_labels` before this!
19,886
def index(self, x, x_link=None): if x is None: raise ValueError("provide at least one dataframe") elif x_link is not None: x = (x, x_link) elif isinstance(x, (list, tuple)): x = tuple(x) else: x = (x,) if self.verify_integrity: for df in x: self._verify_integrety(df) if not self._deduplication(x): pairs = self._link_index(*x) names = self._make_index_names(x[0].index.name, x[1].index.name) else: pairs = self._dedup_index(*x) names = self._make_index_names(x[0].index.name, x[0].index.name) pairs.rename(names, inplace=True) return pairs
Make an index of record pairs. Use a custom function to make record pairs of one or two dataframes. Each function should return a pandas.MultiIndex with record pairs. Parameters ---------- x: pandas.DataFrame A pandas DataFrame. When `x_link` is None, the algorithm makes record pairs within the DataFrame. When `x_link` is not empty, the algorithm makes pairs between `x` and `x_link`. x_link: pandas.DataFrame, optional A second DataFrame to link with the DataFrame x. Returns ------- pandas.MultiIndex A pandas.MultiIndex with record pairs. Each record pair contains the index labels of two records.
19,887
def eval_objfn(self): dfd = self.obfn_dfd() reg = self.obfn_reg() obj = dfd + reg[0] return (obj, dfd) + reg[1:]
Compute components of objective function as well as total contribution to objective function.
19,888
def compare_baselines(old_baseline_filename, new_baseline_filename): if old_baseline_filename == new_baseline_filename: raise RedundantComparisonError old_baseline = _get_baseline_from_file(old_baseline_filename) new_baseline = _get_baseline_from_file(new_baseline_filename) _remove_nonexistent_files_from_baseline(old_baseline) _remove_nonexistent_files_from_baseline(new_baseline) secrets_to_compare = _get_secrets_to_compare(old_baseline, new_baseline) total_reviews = len(secrets_to_compare) current_index = 0 secret_iterator = BidirectionalIterator(secrets_to_compare) for filename, secret, is_removed in secret_iterator: _clear_screen() current_index += 1 header = if is_removed: plugins_used = old_baseline[] header = header.format( colorize(, AnsiColor.BOLD), .format( colorize(, AnsiColor.RED), ), ) else: plugins_used = new_baseline[] header = header.format( colorize(, AnsiColor.BOLD), .format( colorize(, AnsiColor.LIGHT_GREEN), ), ) try: _print_context( filename, secret, current_index, total_reviews, plugins_used, additional_header_lines=header, force=is_removed, ) decision = _get_user_decision( can_step_back=secret_iterator.can_step_back(), prompt_secret_decision=False, ) except SecretNotFoundOnSpecifiedLineError: decision = _get_user_decision(prompt_secret_decision=False) if decision == : print() break if decision == : current_index -= 2 secret_iterator.step_back_on_next_iteration()
This function enables developers to more easily configure plugin settings, by comparing two generated baselines and highlighting their differences. For effective use, a few assumptions are made: 1. Baselines are sorted by (filename, line_number, hash). This allows for a deterministic order, when doing a side-by-side comparison. 2. Baselines are generated for the same codebase snapshot. This means that we won't have cases where secrets are moved around; only added or removed. NOTE: We don't want to do a version check, because we want to be able to use this functionality across versions (to see how the new version fares compared to the old one).
19,889
def get_tablenames(cur): cur.execute("SELECT name FROM sqlite_master WHERE type=") tablename_list_ = cur.fetchall() tablename_list = [str(tablename[0]) for tablename in tablename_list_ ] return tablename_list
Conveinience:
19,890
def changeLocalUserPassword(self, login, user, password): self.send_changeLocalUserPassword(login, user, password) self.recv_changeLocalUserPassword()
Parameters: - login - user - password
19,891
def text_fd_to_metric_families(fd): name = None allowed_names = [] eof = False seen_metrics = set() def build_metric(name, documentation, typ, unit, samples): if name in seen_metrics: raise ValueError("Duplicate metric: " + name) seen_metrics.add(name) if typ is None: typ = if documentation is None: documentation = if unit is None: unit = if unit and not name.endswith("_" + unit): raise ValueError("Unit does not match metric name: " + name) if unit and typ in [, ]: raise ValueError("Units not allowed for this metric type: " + name) if typ in [, ]: _check_histogram(samples, name) metric = Metric(name, documentation, typ, unit) metric.samples = samples return metric for line in fd: if line[-1] == : line = line[:-1] if eof: raise ValueError("Received line after if line == : eof = True elif line.startswith(): parts = line.split(, 3) if len(parts) < 4: raise ValueError("Invalid line: " + line) if parts[2] == name and samples: raise ValueError("Received metadata after samples: " + line) if parts[2] != name: if name is not None: yield build_metric(name, documentation, typ, unit, samples) name = parts[2] unit = None typ = None documentation = None group = None seen_groups = set() group_timestamp = None group_timestamp_samples = set() samples = [] allowed_names = [parts[2]] if parts[1] == : if documentation is not None: raise ValueError("More than one HELP for metric: " + line) if len(parts) == 4: documentation = _unescape_help(parts[3]) elif len(parts) == 3: raise ValueError("Invalid line: " + line) elif parts[1] == : if typ is not None: raise ValueError("More than one TYPE for metric: " + line) typ = parts[3] if typ == : raise ValueError("Invalid TYPE for metric: " + line) allowed_names = { : [, ], : [, , , ], : [, , , ], : [, , ], : [], }.get(typ, []) allowed_names = [name + n for n in allowed_names] elif parts[1] == : if unit is not None: raise ValueError("More than one UNIT for metric: " + line) unit = parts[3] else: raise ValueError("Invalid line: " + line) else: sample = _parse_sample(line) if sample.name not in allowed_names: if name is not None: yield build_metric(name, documentation, typ, unit, samples) name = sample.name documentation = None unit = None typ = samples = [] group = None group_timestamp = None group_timestamp_samples = set() seen_groups = set() allowed_names = [sample.name] if typ == and name not in sample.labels: raise ValueError("Stateset missing label: " + line) if (typ in [, ] and name + == sample.name and (float(sample.labels.get(, -1)) < 0 or sample.labels[] != floatToGoString(sample.labels[]))): raise ValueError("Invalid le label: " + line) if (typ == and name == sample.name and (not (0 <= float(sample.labels.get(, -1)) <= 1) or sample.labels[] != floatToGoString(sample.labels[]))): raise ValueError("Invalid quantile label: " + line) g = tuple(sorted(_group_for_sample(sample, name, typ).items())) if group is not None and g != group and g in seen_groups: raise ValueError("Invalid metric grouping: " + line) if group is not None and g == group: if (sample.timestamp is None) != (group_timestamp is None): raise ValueError("Mix of timestamp presence within a group: " + line) if group_timestamp is not None and group_timestamp > sample.timestamp and typ != : raise ValueError("Timestamps went backwards within a group: " + line) else: group_timestamp_samples = set() series_id = (sample.name, tuple(sorted(sample.labels.items()))) if sample.timestamp != group_timestamp or series_id not in group_timestamp_samples: samples.append(sample) group_timestamp_samples.add(series_id) group = g group_timestamp = sample.timestamp seen_groups.add(g) if typ == and sample.value not in [0, 1]: raise ValueError("Stateset samples can only have values zero and one: " + line) if typ == and sample.value != 1: raise ValueError("Info samples can only have value one: " + line) if typ == and name == sample.name and sample.value < 0: raise ValueError("Quantile values cannot be negative: " + line) if sample.name[len(name):] in [, , , , , ] and math.isnan( sample.value): raise ValueError("Counter-like samples cannot be NaN: " + line) if sample.name[len(name):] in [, , , , , ] and sample.value < 0: raise ValueError("Counter-like samples cannot be negative: " + line) if sample.exemplar and not ( typ in [, ] and sample.name.endswith()): raise ValueError("Invalid line only histogram/gaugehistogram buckets can have exemplars: " + line) if name is not None: yield build_metric(name, documentation, typ, unit, samples) if not eof: raise ValueError("Missing
Parse Prometheus text format from a file descriptor. This is a laxer parser than the main Go parser, so successful parsing does not imply that the parsed text meets the specification. Yields Metric's.
19,892
def schedule_host_check(self, host, check_time): host.schedule(self.daemon.hosts, self.daemon.services, self.daemon.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=False, force_time=check_time) self.send_an_element(host.get_update_status_brok())
Schedule a check on a host Format of the line that triggers function call:: SCHEDULE_HOST_CHECK;<host_name>;<check_time> :param host: host to check :type host: alignak.object.host.Host :param check_time: time to check :type check_time: :return: None
19,893
def delete_quick(self, get_count=False): query = + self.full_table_name + self.where_clause self.connection.query(query) count = self.connection.query("SELECT ROW_COUNT()").fetchone()[0] if get_count else None self._log(query[:255]) return count
Deletes the table without cascading and without user prompt. If this table has populated dependent tables, this will fail.
19,894
def convert_coord(coord_from,matrix_file,base_to_aligned=True): with open(matrix_file) as f: try: values = [float(y) for y in .join([x for x in f.readlines() if x.strip()[0]!=]).strip().split()] except: nl.notify( % matrix_file, level=nl.level.error) return False if len(values)!=12: nl.notify( % (len(values),matrix_file), level=nl.level.error) return False matrix = np.vstack((np.array(values).reshape((3,-1)),[0,0,0,1])) if not base_to_aligned: matrix = np.linalg.inv(matrix) return np.dot(matrix,list(coord_from) + [1])[:3]
Takes an XYZ array (in DICOM coordinates) and uses the matrix file produced by 3dAllineate to transform it. By default, the 3dAllineate matrix transforms from base to aligned space; to get the inverse transform set ``base_to_aligned`` to ``False``
19,895
def query(self, query_dict: Dict[str, Any]) -> None: self.parse_url.query = cast(Any, query_dict)
重写 query
19,896
async def list(self, *, filters: Mapping = None) -> List[Mapping]: params = {"filters": clean_filters(filters)} response = await self.docker._query_json( "services", method="GET", params=params ) return response
Return a list of services Args: filters: a dict with a list of filters Available filters: id=<service id> label=<service label> mode=["replicated"|"global"] name=<service name>
19,897
def create_project(type, schema, server, name, output, verbose): if verbose: log("Entity Matching Server: {}".format(server)) if schema is not None: schema_json = json.load(schema) clkhash.schema.validate_schema_dict(schema_json) else: raise ValueError("Schema must be provided when creating new linkage project") name = name if name is not None else try: project_creation_reply = project_create(server, schema_json, type, name) except ServiceError as e: log("Unexpected response - {}".format(e.status_code)) log(e.text) raise SystemExit else: log("Project created") json.dump(project_creation_reply, output)
Create a new project on an entity matching server. See entity matching service documentation for details on mapping type and schema Returns authentication details for the created project.
19,898
def create_weather(self, **kwargs): weather = predix.admin.weather.WeatherForecast(**kwargs) weather.create() client_id = self.get_client_id() if client_id: weather.grant_client(client_id) weather.grant_client(client_id) weather.add_to_manifest(self) return weather
Creates an instance of the Asset Service.
19,899
def DownloadFile(file_obj, target_path, buffer_size=BUFFER_SIZE): logging.info(u"Downloading: %s to: %s", file_obj.urn, target_path) target_file = open(target_path, "wb") file_obj.Seek(0) count = 0 data_buffer = file_obj.Read(buffer_size) while data_buffer: target_file.write(data_buffer) data_buffer = file_obj.Read(buffer_size) count += 1 if not count % 3: logging.debug(u"Downloading: %s: %s done", file_obj.urn, utils.FormatNumberAsString(count * buffer_size)) target_file.close()
Download an aff4 file to the local filesystem overwriting it if it exists. Args: file_obj: An aff4 object that supports the file interface (Read, Seek) target_path: Full path of file to write to. buffer_size: Read in chunks this size.