Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
12,600
def fetch_blob(cls, username, password, multifactor_password=None, client_id=None): session = fetcher.login(username, password, multifactor_password, client_id) blob = fetcher.fetch(session) fetcher.logout(session) return blob
Just fetches the blob, could be used to store it locally
12,601
def create_gre_tunnel_no_encryption(cls, name, local_endpoint, remote_endpoint, mtu=0, pmtu_discovery=True, ttl=0, enabled=True, comment=None): return cls.create_gre_tunnel_mode( name, local_endpoint, remote_endpoint, policy_vpn=None, mtu=mtu, pmtu_discovery=pmtu_discovery, ttl=ttl, enabled=enabled, comment=comment)
Create a GRE Tunnel with no encryption. See `create_gre_tunnel_mode` for constructor descriptions.
12,602
def _prune_penalty_box(self): added = False for client in self.penalty_box.get(): log.info("Client %r is back up.", client) self.active_clients.append(client) added = True if added: self._sort_clients()
Restores clients that have reconnected. This function should be called first for every public method.
12,603
def get_signature_candidate(lines): non_empty = [i for i, line in enumerate(lines) if line.strip()] if len(non_empty) <= 1: return [] candidate = candidate[-SIGNATURE_MAX_LINES:] markers = _mark_candidate_indexes(lines, candidate) candidate = _process_marked_candidate_indexes(candidate, markers) if candidate: candidate = lines[candidate[0]:] return candidate return []
Return lines that could hold signature The lines should: * be among last SIGNATURE_MAX_LINES non-empty lines. * not include first line * be shorter than TOO_LONG_SIGNATURE_LINE * not include more than one line that starts with dashes
12,604
def check_node_parent( self, resource_id, new_parent_id, db_session=None, *args, **kwargs ): return self.service.check_node_parent( resource_id=resource_id, new_parent_id=new_parent_id, db_session=db_session, *args, **kwargs )
Checks if parent destination is valid for node :param resource_id: :param new_parent_id: :param db_session: :return:
12,605
def get_subclass_tree(cls, ensure_unique=True): subclasses = [] for subcls in type.__subclasses__(cls): subclasses.append(subcls) subclasses.extend(get_subclass_tree(subcls, ensure_unique)) return list(set(subclasses)) if ensure_unique else subclasses
Returns all subclasses (direct and recursive) of cls.
12,606
def get_json_response_object(self, datatable): datatable.populate_records() draw = getattr(self.request, self.request.method).get(, None) if draw is not None: draw = escape_uri_path(draw) response_data = { : draw, : datatable.unpaged_record_count, : datatable.total_initial_record_count, : [dict(record, **{ : record.pop(), : record.pop(), }) for record in datatable.get_records()], } return response_data
Returns the JSON-compatible dictionary that will be serialized for an AJAX response. The value names are in the form "s~" for strings, "i~" for integers, and "a~" for arrays, if you're unfamiliar with the old C-style jargon used in dataTables.js. "aa~" means "array of arrays". In some instances, the author uses "ao~" for "array of objects", an object being a javascript dictionary.
12,607
def get_callback_function(setting_name, default=None): func = getattr(settings, setting_name, None) if not func: return default if callable(func): return func if isinstance(func, str): func = import_string(func) if not callable(func): raise ImproperlyConfigured("{name} must be callable.".format(name=setting_name)) return func
Resolve a callback function based on a setting name. If the setting value isn't set, default is returned. If the setting value is already a callable function, that value is used - If the setting value is a string, an attempt is made to import it. Anything else will result in a failed import causing ImportError to be raised. :param setting_name: The name of the setting to resolve a callback from. :type setting_name: string (``str``/``unicode``) :param default: The default to return if setting isn't populated. :type default: ``bool`` :returns: The resolved callback function (if any). :type: ``callable``
12,608
def add_load(self, lv_load): if lv_load not in self._loads and isinstance(lv_load, LVLoadDing0): self._loads.append(lv_load) self.graph_add_node(lv_load)
Adds a LV load to _loads and grid graph if not already existing Parameters ---------- lv_load : Description #TODO
12,609
def compare(left: Union[str, pathlib.Path, _Entity], right: Union[str, pathlib.Path, _Entity]) -> Comparison: def normalise(param: Union[str, pathlib.Path, _Entity]) -> _Entity: if isinstance(param, str): param = pathlib.Path(param) if isinstance(param, pathlib.Path): param = _Entity.from_path(param) return param return Comparison.compare(normalise(left), normalise(right))
Compare two paths. :param left: The left side or "before" entity. :param right: The right side or "after" entity. :return: A comparison details what has changed from the left side to the right side.
12,610
def _split_input_slice(batch_size, work_load_list): total_work_load = sum(work_load_list) batch_num_list = [round(work_load * batch_size / total_work_load) for work_load in work_load_list] batch_num_sum = sum(batch_num_list) if batch_num_sum < batch_size: batch_num_list[-1] += batch_size - batch_num_sum slices = [] end = 0 for batch_num in batch_num_list: begin = int(min((end, batch_size))) end = int(min((begin + batch_num, batch_size))) if begin >= end: raise ValueError() slices.append(slice(begin, end)) return slices
Get input slice from the input shape. Parameters ---------- batch_size : int The number of samples in a mini-batch. work_load_list : list of float or int, optional The list of work load for different devices, in the same order as `ctx`. Returns ------- slices : list of slice The split slices to get a specific slice. Raises ------ ValueError In case of too many splits, leading to some empty slices.
12,611
def get_column_at_index(self, index): if index is None: return None url = self.build_url(self._endpoints.get()) response = self.session.post(url, data={: index}) if not response: return None return self.column_constructor(parent=self, **{self._cloud_data_key: response.json()})
Returns a table column by it's index :param int index: the zero-indexed position of the column in the table
12,612
def upload_file(request): if request.method == : form = MediaForm(request.POST, request.FILES) if form.is_valid(): context_dict = {} try: context_dict[] = update_media_file( request.FILES[]) except Exception as e: context_dict[] = e.message return render(request, , context_dict) else: form = MediaForm() return render(request, , {: form})
Upload a Zip File Containing a single file containing media.
12,613
def standard_block(self, bytes_): self.out(self.LH(len(bytes_) + 1)) checksum = 0 for i in bytes_: checksum ^= (int(i) & 0xFF) self.out(i) self.out(checksum)
Adds a standard block of bytes. For TAP files, it's just the Low + Hi byte plus the content (here, the bytes plus the checksum)
12,614
def apply_plugin_settings(self, options): color_scheme_n = color_scheme_o = self.get_color_scheme() font_n = font_o = self.get_plugin_font() wrap_n = wrap_o = self.get_option(wrap_n) self.wrap_action.setChecked(wrap_o) linenb_n = linenb_o = self.get_option(linenb_n) for editor in self.editors: if font_n in options: scs = color_scheme_o if color_scheme_n in options else None editor.set_font(font_o, scs) elif color_scheme_n in options: editor.set_color_scheme(color_scheme_o) if wrap_n in options: editor.toggle_wrap_mode(wrap_o) if linenb_n in options: editor.toggle_line_numbers(linenumbers=linenb_o, markers=False)
Apply configuration file's plugin settings
12,615
def _allowAnotherAt(cls, parent): site = parent.get_site() if site is None: return False return not cls.peers().descendant_of(site.root_page).exists()
You can only create one of these pages per site.
12,616
def wheel_dist_name(self): components = (safer_name(self.distribution.get_name()), safer_version(self.distribution.get_version())) if self.build_number: components += (self.build_number,) return .join(components)
Return distribution full name with - replaced with _
12,617
def cPrint(self, level, message, *args, **kw): if level > self.consolePrinterVerbosity: return if len(kw) > 1: raise KeyError("Invalid keywords for cPrint: %s" % str(kw.keys())) newline = kw.get("newline", True) if len(kw) == 1 and not in kw: raise KeyError("Invalid keyword for cPrint: %s" % kw.keys()[0]) if len(args) == 0: if newline: print message else: print message, else: if newline: print message % args else: print message % args,
Print a message to the console. Prints only if level <= self.consolePrinterVerbosity Printing with level 0 is equivalent to using a print statement, and should normally be avoided. :param level: (int) indicating the urgency of the message with lower values meaning more urgent (messages at level 0 are the most urgent and are always printed) :param message: (string) possibly with format specifiers :param args: specifies the values for any format specifiers in message :param kw: newline is the only keyword argument. True (default) if a newline should be printed
12,618
def get_filtered_register_graph(register_uri, g): import requests from pyldapi.exceptions import ViewsFormatsException assert isinstance(g, Graph) logging.debug( + register_uri.replace(, )) try: r = requests.get(register_uri) print( + register_uri) except ViewsFormatsException as e: return False if r.status_code == 200: return _filter_register_graph(register_uri.replace(, ), r, g) logging.debug(.format(register_uri)) return False
Gets a filtered version (label, comment, contained item classes & subregisters only) of the each register for the Register of Registers :param register_uri: the public URI of the register :type register_uri: string :param g: the rdf graph to append registers to :type g: Graph :return: True if ok, else False :rtype: boolean
12,619
def _combine(self, applied, shortcut=False): applied_example, applied = peek_at(applied) coord, dim, positions = self._infer_concat_args(applied_example) if shortcut: combined = self._concat_shortcut(applied, dim, positions) else: combined = concat(applied, dim) combined = _maybe_reorder(combined, dim, positions) if isinstance(combined, type(self._obj)): combined = self._restore_dim_order(combined) if coord is not None: if shortcut: combined._coords[coord.name] = as_variable(coord) else: combined.coords[coord.name] = coord combined = self._maybe_restore_empty_groups(combined) combined = self._maybe_unstack(combined) return combined
Recombine the applied objects like the original.
12,620
def state_province_region(self, value=None): if value is not None: try: value = str(value) except ValueError: raise ValueError( .format(value)) if in value: raise ValueError( ) self._state_province_region = value
Corresponds to IDD Field `state_province_region` Args: value (str): value for IDD Field `state_province_region` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
12,621
def reboot_node(node_id, profile, **libcloud_kwargs): s reboot_node method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_compute.reboot_node as-2346 profile1 ' conn = _get_driver(profile=profile) node = _get_by_id(conn.list_nodes(**libcloud_kwargs), node_id) return conn.reboot_node(node, **libcloud_kwargs)
Reboot a node in the cloud :param node_id: Unique ID of the node to reboot :type node_id: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's reboot_node method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_compute.reboot_node as-2346 profile1
12,622
def _request(self, method, uri, headers={}, body=, stream=False): response = None headers.setdefault(, ) if self._client._credentials: self._security_auth_headers(self._client._credentials.username, self._client._credentials.password, headers) try: self._connection.request(method, uri, body, headers) try: response = self._connection.getresponse(buffering=True) except TypeError: response = self._connection.getresponse() if stream: response_body = response else: response_body = response.read() finally: if response and not stream: response.close() return response.status, response.msg, response_body
Given a Method, URL, Headers, and Body, perform and HTTP request, and return a 3-tuple containing the response status, response headers (as httplib.HTTPMessage), and response body.
12,623
def create_endpoint_folder(self, endpoint_id, folder): try: res = self.transfer_client.operation_mkdir(endpoint_id, folder) bot.info("%s --> %s" %(res[], folder)) except TransferAPIError: bot.info( %folder)
create an endpoint folder, catching the error if it exists. Parameters ========== endpoint_id: the endpoint id parameters folder: the relative path of the folder to create
12,624
def p_union_patch(self, p): p[0] = AstUnionPatch( path=self.path, lineno=p[2][1], lexpos=p[2][2], name=p[3], fields=p[6], examples=p[7], closed=p[2][0] == )
union_patch : PATCH uniont ID NL INDENT field_list examples DEDENT
12,625
def refresh(self, data): modules = data.get("module") update_i3status = False for module_name in self.find_modules(modules): module = self.py3_wrapper.output_modules[module_name] if self.debug: self.py3_wrapper.log("refresh %s" % module) if module["type"] == "py3status": module["module"].force_update() else: update_i3status = True if update_i3status: self.py3_wrapper.i3status_thread.refresh_i3status()
refresh the module(s)
12,626
def getCachedOrUpdatedValue(self, key): try: return self._VALUES[key] except KeyError: return self.getValue(key)
Gets the device's value with the given key. If the key is not found in the cache, the value is queried from the host.
12,627
def add_attribute(self, tag, name, value): self.add_tag(tag) d = self._tags[tag] d[name] = value
add an attribute (nam, value pair) to the named tag
12,628
def plot_world(*args, **kwargs): interactive = kwargs.pop(, True) if interactive: plot_world_with_elegans(*args, **kwargs) else: plot_world_with_matplotlib(*args, **kwargs)
Generate a plot from received instance of World and show it. See also plot_world_with_elegans and plot_world_with_matplotlib. Parameters ---------- world : World or str World or a HDF5 filename to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. Examples -------- >>> plot_world(w) >>> plot_world(w, interactive=False)
12,629
def _get_tick_frac_labels(self): minor_num = 4 if (self.axis.scale_type == ): domain = self.axis.domain if domain[1] < domain[0]: flip = True domain = domain[::-1] else: flip = False offset = domain[0] scale = domain[1] - domain[0] transforms = self.axis.transforms length = self.axis.pos[1] - self.axis.pos[0] n_inches = np.sqrt(np.sum(length ** 2)) / transforms.dpi major = _get_ticks_talbot(domain[0], domain[1], n_inches, 2) labels = [ % x for x in major] majstep = major[1] - major[0] minor = [] minstep = majstep / (minor_num + 1) minstart = 0 if self.axis._stop_at_major[0] else -1 minstop = -1 if self.axis._stop_at_major[1] else 0 for i in range(minstart, len(major) + minstop): maj = major[0] + i * majstep minor.extend(np.linspace(maj + minstep, maj + majstep - minstep, minor_num)) major_frac = (major - offset) / scale minor_frac = (np.array(minor) - offset) / scale major_frac = major_frac[::-1] if flip else major_frac use_mask = (major_frac > -0.0001) & (major_frac < 1.0001) major_frac = major_frac[use_mask] labels = [l for li, l in enumerate(labels) if use_mask[li]] minor_frac = minor_frac[(minor_frac > -0.0001) & (minor_frac < 1.0001)] elif self.axis.scale_type == : return NotImplementedError elif self.axis.scale_type == : return NotImplementedError return major_frac, minor_frac, labels
Get the major ticks, minor ticks, and major labels
12,630
def make_ns(self, ns): if self.namespace: val = {} val.update(self.namespace) val.update(ns) return val else: return ns
Returns the `lazily` created template namespace.
12,631
def user(self): try: return self._user except AttributeError: self._user = MatrixUser(self.mxid, self.Api(identity=self.mxid)) return self._user
Creates a User object when requested.
12,632
def smoother_step(F, filt, next_pred, next_smth): J = dotdot(filt.cov, F.T, inv(next_pred.cov)) smth_cov = filt.cov + dotdot(J, next_smth.cov - next_pred.cov, J.T) smth_mean = filt.mean + np.matmul(next_smth.mean - next_pred.mean, J.T) return MeanAndCov(mean=smth_mean, cov=smth_cov)
Smoothing step of Kalman filter/smoother. Parameters ---------- F: (dx, dx) numpy array Mean of X_t | X_{t-1} is F * X_{t-1} filt: MeanAndCov object filtering distribution at time t next_pred: MeanAndCov object predictive distribution at time t+1 next_smth: MeanAndCov object smoothing distribution at time t+1 Returns ------- smth: MeanAndCov object smoothing distribution at time t
12,633
def execute(self): stack = self._stack callbacks = self._callbacks promises = [] if stack: def process(): pipe = ConnectionManager.get(self.connection_name) call_stack = [] futures = [] for f, args, kwargs in call_stack: f(*args, **kwargs) for i, v in enumerate(pipe.execute()): futures[i].set(v) promises.append(process) promises += [p.execute for p in self._pipelines.values()] if len(promises) == 1: promises[0]() else: TaskManager.wait(*[TaskManager.promise(p) for p in promises]) for cb in callbacks: cb()
Invoke the redispy pipeline.execute() method and take all the values returned in sequential order of commands and map them to the Future objects we returned when each command was queued inside the pipeline. Also invoke all the callback functions queued up. :param raise_on_error: boolean :return: None
12,634
def api_request( self, method, path, query_params=None, data=None, content_type=None, headers=None, api_base_url=None, api_version=None, expect_json=True, _target_object=None, ): url = self.build_api_url( path=path, query_params=query_params, api_base_url=api_base_url, api_version=api_version, ) if data and isinstance(data, dict): data = json.dumps(data) content_type = "application/json" response = self._make_request( method=method, url=url, data=data, content_type=content_type, headers=headers, target_object=_target_object, ) if not 200 <= response.status_code < 300: raise exceptions.from_http_response(response) if expect_json and response.content: return response.json() else: return response.content
Make a request over the HTTP transport to the API. You shouldn't need to use this method, but if you plan to interact with the API using these primitives, this is the correct one to use. :type method: str :param method: The HTTP method name (ie, ``GET``, ``POST``, etc). Required. :type path: str :param path: The path to the resource (ie, ``'/b/bucket-name'``). Required. :type query_params: dict or list :param query_params: A dictionary of keys and values (or list of key-value pairs) to insert into the query string of the URL. :type data: str :param data: The data to send as the body of the request. Default is the empty string. :type content_type: str :param content_type: The proper MIME type of the data provided. Default is None. :type headers: dict :param headers: extra HTTP headers to be sent with the request. :type api_base_url: str :param api_base_url: The base URL for the API endpoint. Typically you won't have to provide this. Default is the standard API base URL. :type api_version: str :param api_version: The version of the API to call. Typically you shouldn't provide this and instead use the default for the library. Default is the latest API version supported by google-cloud-python. :type expect_json: bool :param expect_json: If True, this method will try to parse the response as JSON and raise an exception if that cannot be done. Default is True. :type _target_object: :class:`object` :param _target_object: (Optional) Protected argument to be used by library callers. This can allow custom behavior, for example, to defer an HTTP request and complete initialization of the object at a later time. :raises ~google.cloud.exceptions.GoogleCloudError: if the response code is not 200 OK. :raises ValueError: if the response content type is not JSON. :rtype: dict or str :returns: The API response payload, either as a raw string or a dictionary if the response is valid JSON.
12,635
def create_constants(self, rdbms):
Factory for creating a Constants objects (i.e. objects for creating constants based on column widths, and auto increment columns and labels). :param str rdbms: The target RDBMS (i.e. mysql, mssql or pgsql). :rtype: pystratum.Constants.Constants
12,636
def reload_cache_config(self, call_params): path = + self.api_version + method = return self.request(path, method, call_params)
REST Reload Plivo Cache Config helper
12,637
def get_window_settings(self): window_size = (self.window_size.width(), self.window_size.height()) is_fullscreen = self.isFullScreen() if is_fullscreen: is_maximized = self.maximized_flag else: is_maximized = self.isMaximized() pos = (self.window_position.x(), self.window_position.y()) prefs_dialog_size = (self.prefs_dialog_size.width(), self.prefs_dialog_size.height()) hexstate = qbytearray_to_str(self.saveState()) return (hexstate, window_size, prefs_dialog_size, pos, is_maximized, is_fullscreen)
Return current window settings Symetric to the 'set_window_settings' setter
12,638
def recv(sock, size): data = sock.recv(size, socket.MSG_WAITALL) if len(data) < size: raise socket.error(ECONNRESET, ) return data
Receives exactly `size` bytes. This function blocks the thread.
12,639
def fork(self, server_address: str = None, *, namespace: str = None) -> "State": r if server_address is None: server_address = self.server_address if namespace is None: namespace = self.namespace return self.__class__(server_address, namespace=namespace)
r""" "Forks" this State object. Takes the same args as the :py:class:`State` constructor, except that they automatically default to the values provided during the creation of this State object. If no args are provided to this function, then it shall create a new :py:class:`State` object that follows the exact same semantics as this one. This is preferred over ``copy()``\ -ing a :py:class:`State` object. Useful when one needs to access 2 or more namespaces from the same code.
12,640
def timeout(seconds=None, use_signals=True, timeout_exception=TimeoutError, exception_message=None): def decorate(function): if not seconds: return function if use_signals: def handler(signum, frame): _raise_exception(timeout_exception, exception_message) @wraps(function) def new_function(*args, **kwargs): new_seconds = kwargs.pop(, seconds) if new_seconds: old = signal.signal(signal.SIGALRM, handler) signal.setitimer(signal.ITIMER_REAL, new_seconds) try: return function(*args, **kwargs) finally: if new_seconds: signal.setitimer(signal.ITIMER_REAL, 0) signal.signal(signal.SIGALRM, old) return new_function else: @wraps(function) def new_function(*args, **kwargs): timeout_wrapper = _Timeout(function, timeout_exception, exception_message, seconds) return timeout_wrapper(*args, **kwargs) return new_function return decorate
Add a timeout parameter to a function and return it. :param seconds: optional time limit in seconds or fractions of a second. If None is passed, no timeout is applied. This adds some flexibility to the usage: you can disable timing out depending on the settings. :type seconds: float :param use_signals: flag indicating whether signals should be used for timing function out or the multiprocessing When using multiprocessing, timeout granularity is limited to 10ths of a second. :type use_signals: bool :raises: TimeoutError if time limit is reached It is illegal to pass anything other than a function as the first parameter. The function is wrapped and returned to the caller.
12,641
def process_rst_and_summaries(content_generators): for generator in content_generators: if isinstance(generator, generators.ArticlesGenerator): for article in ( generator.articles + generator.translations + generator.drafts): rst_add_mathjax(article) if process_summary.mathjax_script is not None: process_summary(article) elif isinstance(generator, generators.PagesGenerator): for page in generator.pages: rst_add_mathjax(page) for page in generator.hidden_pages: rst_add_mathjax(page)
Ensure mathjax script is applied to RST and summaries are corrected if specified in user settings. Handles content attached to ArticleGenerator and PageGenerator objects, since the plugin doesn't know how to handle other Generator types. For reStructuredText content, examine both articles and pages. If article or page is reStructuredText and there is math present, append the mathjax script. Also process summaries if present (only applies to articles) and user wants summaries processed (via user settings)
12,642
def parse_epsv_response(s): matches = tuple(re.finditer(r"\((.)\1\1\d+\1\)", s)) s = matches[-1].group() port = int(s[4:-2]) return None, port
Parsing `EPSV` (`message (|||port|)`) response. :param s: response line :type s: :py:class:`str` :return: (ip, port) :rtype: (:py:class:`None`, :py:class:`int`)
12,643
def predict(self, X): return [self.classes[prediction.argmax()] for prediction in self.predict_proba(X)]
Predict the class for X. The predicted class for each sample in X is returned. Parameters ---------- X : List of ndarrays, one for each training example. Each training example's shape is (string1_len, string2_len, n_features), where string1_len and string2_len are the length of the two training strings and n_features the number of features. Returns ------- y : iterable of shape = [n_samples] The predicted classes.
12,644
def regions(self): url = "%s/regions" % self.root params = {"f": "json"} return self._get(url=url, param_dict=params, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
gets the regions value
12,645
def output_to_json(sources): results = OrderedDict() for source in sources: if source.get_is_available(): source.update() source_name = source.get_source_name() results[source_name] = source.get_sensors_summary() print(json.dumps(results, indent=4)) sys.exit()
Print statistics to the terminal in Json format
12,646
def minimumBelow(requestContext, seriesList, n): results = [] for series in seriesList: val = safeMin(series) if val is None or val <= n: results.append(series) return results
Takes one metric or a wildcard seriesList followed by a constant n. Draws only the metrics with a minimum value below n. Example:: &target=minimumBelow(system.interface.eth*.packetsSent,1000) This would only display interfaces which sent at one point less than 1000 packets/min.
12,647
def fromlineno(self): lineno = super(Arguments, self).fromlineno return max(lineno, self.parent.fromlineno or 0)
The first line that this node appears on in the source code. :type: int or None
12,648
def permission_required(perm, *lookup_variables, **kwargs): login_url = kwargs.pop(, settings.LOGIN_URL) redirect_field_name = kwargs.pop(, REDIRECT_FIELD_NAME) redirect_to_login = kwargs.pop(, True) def decorate(view_func): def decorated(request, *args, **kwargs): if request.user.is_authenticated(): params = [] for lookup_variable in lookup_variables: if isinstance(lookup_variable, string_types): value = kwargs.get(lookup_variable, None) if value is None: continue params.append(value) elif isinstance(lookup_variable, (tuple, list)): model, lookup, varname = lookup_variable value = kwargs.get(varname, None) if value is None: continue if isinstance(model, string_types): model_class = apps.get_model(*model.split(".")) else: model_class = model if model_class is None: raise ValueError( "The given argument is not a valid model." % model) if (inspect.isclass(model_class) and not issubclass(model_class, Model)): raise ValueError( % model) obj = get_object_or_404(model_class, **{lookup: value}) params.append(obj) check = get_check(request.user, perm) granted = False if check is not None: granted = check(*params) if granted or request.user.has_perm(perm): return view_func(request, *args, **kwargs) if redirect_to_login: path = urlquote(request.get_full_path()) tup = login_url, redirect_field_name, path return HttpResponseRedirect( % tup) return permission_denied(request) return wraps(view_func)(decorated) return decorate
Decorator for views that checks whether a user has a particular permission enabled, redirecting to the log-in page if necessary.
12,649
def update(self, item, dry_run=None): logger.debug(.format( item=item, namespace=self.namespace )) if not dry_run: self.table.put_item(Item=item) return item
Updates item info in file.
12,650
def create_session(self, session_id, register=True, session_factory=None): if session_factory is not None: sess_factory, sess_args, sess_kwargs = session_factory s = sess_factory(*sess_args, **sess_kwargs) else: s = session.Session(self._connection, self, session_id, self.settings.get()) if register: self._sessions.add(s) return s
Creates new session object and returns it. @param session_id: Session id. If not provided, will generate a new session id. @param register: Should be the session registered in a storage. Websockets don't need it. @param session_factory: Use the given (class, args, kwargs) tuple to create the session. Class should derive from `BaseSession`. Normally not needed.
12,651
def is_data_diverging(data_container): assert infer_data_type(data_container) in [ "ordinal", "continuous", ], "Data type should be ordinal or continuous" has_negative = False has_positive = False for i in data_container: if i < 0: has_negative = True elif i > 0: has_positive = True if has_negative and has_positive: return True else: return False
We want to use this to check whether the data are diverging or not. This is a simple check, can be made much more sophisticated. :param data_container: A generic container of data points. :type data_container: `iterable`
12,652
def _fix_up_properties(cls): kind = cls._get_kind() if not isinstance(kind, basestring): raise KindError( % (cls.__name__, kind)) if not isinstance(kind, str): try: kind = kind.encode() except UnicodeEncodeError: raise KindError( % (cls.__name__, kind)) cls._properties = {} if cls.__module__ == __name__: return for name in set(dir(cls)): attr = getattr(cls, name, None) if isinstance(attr, ModelAttribute) and not isinstance(attr, ModelKey): if name.startswith(): raise TypeError( % name) attr._fix_up(cls, name) if isinstance(attr, Property): if (attr._repeated or (isinstance(attr, StructuredProperty) and attr._modelclass._has_repeated)): cls._has_repeated = True cls._properties[attr._name] = attr cls._update_kind_map()
Fix up the properties by calling their _fix_up() method. Note: This is called by MetaModel, but may also be called manually after dynamically updating a model class.
12,653
def analyze(data, normalize=None, reduce=None, ndims=None, align=None, internal=False): return aligner(reducer(normalizer(data, normalize=normalize, internal=internal), reduce=reduce, ndims=ndims, internal=internal), align=align)
Wrapper function for normalize -> reduce -> align transformations. Parameters ---------- data : numpy array, pandas df, or list of arrays/dfs The data to analyze normalize : str or False or None If set to 'across', the columns of the input data will be z-scored across lists (default). That is, the z-scores will be computed with with respect to column n across all arrays passed in the list. If set to 'within', the columns will be z-scored within each list that is passed. If set to 'row', each row of the input data will be z-scored. If set to False, the input data will be returned with no z-scoring. reduce : str or dict Decomposition/manifold learning model to use. Models supported: PCA, IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA, FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning, TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be passed as a string, but for finer control of the model parameters, pass as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}. See scikit-learn specific model docs for details on parameters supported for each model. ndims : int Number of dimensions to reduce align : str or dict If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be hyperalignment. If 'SRM', alignment algorithm will be shared response model. You can also pass a dictionary for finer control, where the 'model' key is a string that specifies the model and the params key is a dictionary of parameter values (default : 'hyper'). Returns ---------- analyzed_data : list of numpy arrays The processed data
12,654
def lset(self, key, index, value): redis_list = self._get_list(key, ) if redis_list is None: raise ResponseError("no such key") try: redis_list[index] = self._encode(value) except IndexError: raise ResponseError("index out of range")
Emulate lset.
12,655
def create(self, **kwargs): resource = self.resource_class(self.client) resource.update_from_dict(kwargs) resource.save(force_create=True) return resource
Create a resource on the server :params kwargs: Attributes (field names and values) of the new resource
12,656
def get_stored_content_length(headers): length = headers.get() if length is None: length = headers.get() return length
Return the content length (in bytes) of the object as stored in GCS. x-goog-stored-content-length should always be present except when called via the local dev_appserver. Therefore if it is not present we default to the standard content-length header. Args: headers: a dict of headers from the http response. Returns: the stored content length.
12,657
def make_key(table_name, objid): key = datastore.Key() path = key.path_element.add() path.kind = table_name path.name = str(objid) return key
Create an object key for storage.
12,658
def main(): ctx = {} def pretty_json(data): return json.dumps(data, indent=2, sort_keys=True) client = server.create_app().test_client() host = res = client.get(, environ_overrides={: host}) res_data = json.loads(res.data.decode()) ctx[] = pretty_json(res_data) res = client.get(, environ_overrides={: host}) ctx[] = pretty_json(json.loads(res.data.decode())) privkey = pubkey = asset = {: } tx = Transaction.create([pubkey], [([pubkey], 1)], asset=asset, metadata={: 0}) tx = tx.sign([privkey]) ctx[] = pretty_json(tx.to_dict()) ctx[] = tx.outputs[0].public_keys[0] ctx[] = tx.id privkey_transfer = pubkey_transfer = cid = 0 input_ = Input(fulfillment=tx.outputs[cid].fulfillment, fulfills=TransactionLink(txid=tx.id, output=cid), owners_before=tx.outputs[cid].public_keys) tx_transfer = Transaction.transfer([input_], [([pubkey_transfer], 1)], asset_id=tx.id, metadata={: 1}) tx_transfer = tx_transfer.sign([privkey]) ctx[] = pretty_json(tx_transfer.to_dict()) ctx[] = tx_transfer.outputs[0].public_keys[0] ctx[] = tx_transfer.id pubkey_transfer_last = cid = 0 input_ = Input(fulfillment=tx_transfer.outputs[cid].fulfillment, fulfills=TransactionLink(txid=tx_transfer.id, output=cid), owners_before=tx_transfer.outputs[cid].public_keys) tx_transfer_last = Transaction.transfer([input_], [([pubkey_transfer_last], 1)], asset_id=tx.id, metadata={: 2}) tx_transfer_last = tx_transfer_last.sign([privkey_transfer]) ctx[] = pretty_json(tx_transfer_last.to_dict()) ctx[] = tx_transfer_last.id ctx[] = tx_transfer_last.outputs[0].public_keys[0] node_private = "5G2kE1zJAgTajkVSbPAQWo4c2izvtwqaNHYsaNpbbvxX" node_public = "DngBurxfeNVKZWCEcDnLj1eMPAS7focUZTE5FndFGuHT" signature = "53wxrEQDYk1dXzmvNSytbCfmNVnPqPkDQaTnAe8Jf43s6ssejPxezkCvUnGTnduNUmaLjhaan1iRLi3peu6s5DzA" app_hash = block = lib.Block(height=1, transactions=[tx.to_dict()], app_hash=app_hash) block_dict = block._asdict() block_dict.pop() ctx[] = pretty_json(block_dict) ctx[] = block.height block_list = [ block.height ] ctx[] = pretty_json(block_list) base_path = os.path.join(os.path.dirname(__file__), ) if not os.path.exists(base_path): os.makedirs(base_path) for name, tpl in TPLS.items(): path = os.path.join(base_path, name + ) code = tpl % ctx with open(path, ) as handle: handle.write(code)
Main function
12,659
def sort(self, values): for level in self: for wire1, wire2 in level: if values[wire1] > values[wire2]: values[wire1], values[wire2] = values[wire2], values[wire1]
Sort the values in-place based on the connectors in the network.
12,660
def plistfilename(self): t exist. ' if self._plist_fname is None: self._plist_fname = discover_filename(self.label) return self._plist_fname
This is a lazily detected absolute filename of the corresponding property list file (*.plist). None if it doesn't exist.
12,661
def _error_if_word_invalid(word, valid_words_dictionary, technical_words_dictionary, line_offset, col_offset): word_lower = word.lower() valid_words_result = valid_words_dictionary.corrections(word_lower) if technical_words_dictionary: technical_words_result = technical_words_dictionary.corrections(word) else: technical_words_result = Dictionary.Result(False, list()) if not valid_words_result.valid and not technical_words_result.valid: return SpellcheckError(word, line_offset, col_offset, valid_words_result.suggestions, SpellcheckError.InvalidWord)
Return SpellcheckError if this non-technical word is invalid.
12,662
def _assert_command_dict(self, struct, name, path=None, extra_info=None): self._assert_dict(struct, name, path, extra_info) if len(struct) != 1: err = [self._format_error_path(path + [name])] err.append( .format(len(struct), struct)) if extra_info: err.append(extra_info) raise exceptions.YamlSyntaxError(.join(err))
Checks whether struct is a command dict (e.g. it's a dict and has 1 key-value pair.
12,663
def create_api(name, description, cloneFrom=None, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if cloneFrom: api = conn.create_rest_api(name=name, description=description, cloneFrom=cloneFrom) else: api = conn.create_rest_api(name=name, description=description) api = _convert_datetime_str(api) return {: True, : api} if api else {: False} except ClientError as e: return {: False, : __utils__[](e)}
Create a new REST API Service with the given name Returns {created: True} if the rest api was created and returns {created: False} if the rest api was not created. CLI Example: .. code-block:: bash salt myminion boto_apigateway.create_api myapi_name api_description
12,664
def cur_time(typ=, tz=DEFAULT_TZ) -> (datetime.date, str): dt = pd.Timestamp(, tz=tz) if typ == : return dt.strftime() if typ == : return dt.strftime() if typ == : return dt.strftime() if typ == : return dt return dt.date()
Current time Args: typ: one of ['date', 'time', 'time_path', 'raw', ''] tz: timezone Returns: relevant current time or date Examples: >>> cur_dt = pd.Timestamp('now') >>> cur_time(typ='date') == cur_dt.strftime('%Y-%m-%d') True >>> cur_time(typ='time') == cur_dt.strftime('%Y-%m-%d %H:%M:%S') True >>> cur_time(typ='time_path') == cur_dt.strftime('%Y-%m-%d/%H-%M-%S') True >>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp) True >>> cur_time(typ='') == cur_dt.date() True
12,665
def do_refresh(self,args): response = AwsConnectionFactory.getLogClient().describe_log_groups(logGroupNamePrefix=self.stackResource.physical_resource_id) if not in response: raise Exception("Expected log group description to have logGroups entry. Got {}".format(response)) descriptions = [x for x in response[] if x[] == self.stackResource.physical_resource_id] if not descriptions: raise Exception("Could not find log group {} in list {}".format(self.stackResource.physical_resource_id,response[])) self.description = descriptions[0] self.logStreams = self.loadLogStreams() print "== logStream" maxIndex = "{}".format(len(self.logStreams)+1) print "maxIndex:{}".format(maxIndex) frm = " {{0:{}d}}: {{1}}".format(len(maxIndex)) print frm index = 0 for logStream in self.logStreams: print frm.format(index,logStream[]) index += 1
Refresh the view of the log group
12,666
def patch_sys(self, inherit_path): def patch_dict(old_value, new_value): old_value.clear() old_value.update(new_value) def patch_all(path, path_importer_cache, modules): sys.path[:] = path patch_dict(sys.path_importer_cache, path_importer_cache) patch_dict(sys.modules, modules) new_sys_path, new_sys_path_importer_cache, new_sys_modules = self.minimum_sys(inherit_path) new_sys_path.extend(merge_split(self._pex_info.pex_path, self._vars.PEX_PATH)) patch_all(new_sys_path, new_sys_path_importer_cache, new_sys_modules)
Patch sys with all site scrubbed.
12,667
def _attach_record_as_json(mfg_event, record): attachment = mfg_event.attachment.add() attachment.name = TEST_RECORD_ATTACHMENT_NAME test_record_dict = htf_data.convert_to_base_types(record) attachment.value_binary = _convert_object_to_json(test_record_dict) attachment.type = test_runs_pb2.TEXT_UTF8
Attach a copy of the record as JSON so we have an un-mangled copy.
12,668
def filesfile_string(self): lines = [] app = lines.append app(self.input_file.path) app(os.path.join(self.workdir, "unused")) app(os.path.join(self.workdir, self.prefix.odata)) return "\n".join(lines)
String with the list of files and prefixes needed to execute ABINIT.
12,669
def update_stats(stats, start_time, data): end_time = time.time() cmd = data[] try: jid = data[] except KeyError: try: jid = data[][] except KeyError: log.info() return stats create_time = int(time.mktime(time.strptime(jid, ))) latency = start_time - create_time duration = end_time - start_time stats[cmd][] += 1 stats[cmd][] = (stats[cmd][] * (stats[cmd][] - 1) + latency) / stats[cmd][] stats[cmd][] = (stats[cmd][] * (stats[cmd][] - 1) + duration) / stats[cmd][] return stats
Calculate the master stats and return the updated stat info
12,670
def slaveraise(self, type, error, traceback): message = * 1 + pickle.dumps((type, .join(tb.format_exception(type, error, traceback)))) if self.pipe is not None: self.pipe.put(message)
slave only
12,671
def perform_iteration(self): stats = self.get_all_stats() self.redis_client.publish( self.redis_key, jsonify_asdict(stats), )
Get any changes to the log files and push updates to Redis.
12,672
def laplacian(script, iterations=1, boundary=True, cotangent_weight=True, selected=False): filter_xml = .join([ , , .format(iterations), , , , , .format(str(boundary).lower()), , , , , .format(str(cotangent_weight).lower()), , , , , .format(str(selected).lower()), , , , ]) util.write_filter(script, filter_xml) return None
Laplacian smooth of the mesh: for each vertex it calculates the average position with nearest vertex Args: script: the FilterScript object or script filename to write the filter to. iterations (int): The number of times that the whole algorithm (normal smoothing + vertex fitting) is iterated. boundary (bool): If true the boundary edges are smoothed only by themselves (e.g. the polyline forming the boundary of the mesh is independently smoothed). Can reduce the shrinking on the border but can have strange effects on very small boundaries. cotangent_weight (bool): If True the cotangent weighting scheme is computed for the averaging of the position. Otherwise (False) the simpler umbrella scheme (1 if the edge is present) is used. selected (bool): If selected the filter is performed only on the selected faces Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
12,673
def one_line(self): ret = self.get() if ret is None: return False else: return ret.lower().startswith()
Return True|False if the AMP shoukd be displayed in oneline (one_lineline=true|false).
12,674
def speed(self): if self._stalled: return 0 time_sum = 0 data_len_sum = 0 for time_diff, data_len in self._samples: time_sum += time_diff data_len_sum += data_len if time_sum: return data_len_sum / time_sum else: return 0
Return the current transfer speed. Returns: int: The speed in bytes per second.
12,675
def add_to_manifest(self, manifest): manifest.add_service(self.service.name) varname = predix.config.set_env_value(self.use_class, , self._get_uri()) manifest.add_env_var(varname, self._get_uri()) manifest.write_manifest()
Add useful details to the manifest about this service so that it can be used in an application. :param manifest: An predix.admin.app.Manifest object instance that manages reading/writing manifest config for a cloud foundry app.
12,676
def chunks(iterable, size=1): iterator = iter(iterable) for element in iterator: yield chain([element], islice(iterator, size - 1))
Splits iterator in chunks.
12,677
def nvmlDeviceGetPcieReplayCounter(handle): r c_replay = c_uint() fn = _nvmlGetFunctionPointer("nvmlDeviceGetPcieReplayCounter") ret = fn(handle, byref(c_replay)) _nvmlCheckReturn(ret) return bytes_to_str(c_replay.value)
r""" /** * Retrieve the PCIe replay counter. * * For Kepler &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param value Reference in which to return the counter's value * * @return * - \ref NVML_SUCCESS if \a value has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a value is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetPcieReplayCounter
12,678
def parse_san(self, san: str) -> Move: try: if san in ["O-O", "O-O+", "O-O return next(move for move in self.generate_castling_moves() if self.is_kingside_castling(move)) elif san in ["O-O-O", "O-O-O+", "O-O-O return next(move for move in self.generate_castling_moves() if self.is_queenside_castling(move)) except StopIteration: raise ValueError("illegal san: {!r} in {}".format(san, self.fen())) match = SAN_REGEX.match(san) if not match: if san in ["--", "Z0"]: return Move.null() raise ValueError("invalid san: {!r}".format(san)) to_square = SQUARE_NAMES.index(match.group(4)) to_mask = BB_SQUARES[to_square] & ~self.occupied_co[self.turn] p = match.group(5) promotion = p and PIECE_SYMBOLS.index(p[-1].lower()) if match.group(1): piece_type = PIECE_SYMBOLS.index(match.group(1).lower()) from_mask = self.pieces_mask(piece_type, self.turn) else: from_mask = self.pawns if match.group(2): from_mask &= BB_FILES[FILE_NAMES.index(match.group(2))] if match.group(3): from_mask &= BB_RANKS[int(match.group(3)) - 1] matched_move = None for move in self.generate_legal_moves(from_mask, to_mask): if move.promotion != promotion: continue if matched_move: raise ValueError("ambiguous san: {!r} in {}".format(san, self.fen())) matched_move = move if not matched_move: raise ValueError("illegal san: {!r} in {}".format(san, self.fen())) return matched_move
Uses the current position as the context to parse a move in standard algebraic notation and returns the corresponding move object. The returned move is guaranteed to be either legal or a null move. :raises: :exc:`ValueError` if the SAN is invalid or ambiguous.
12,679
def get_separator_words(toks1): tab_toks1 = nltk.FreqDist(word.lower() for word in toks1) if(os.path.isfile(ESSAY_COR_TOKENS_PATH)): toks2 = pickle.load(open(ESSAY_COR_TOKENS_PATH, )) else: essay_corpus = open(ESSAY_CORPUS_PATH).read() essay_corpus = sub_chars(essay_corpus) toks2 = nltk.FreqDist(word.lower() for word in nltk.word_tokenize(essay_corpus)) pickle.dump(toks2, open(ESSAY_COR_TOKENS_PATH, )) sep_words = [] for word in tab_toks1.keys(): tok1_present = tab_toks1[word] if(tok1_present > 2): tok1_total = tab_toks1._N tok2_present = toks2[word] tok2_total = toks2._N fish_val = pvalue(tok1_present, tok2_present, tok1_total, tok2_total).two_tail if(fish_val < .001 and tok1_present / float(tok1_total) > (tok2_present / float(tok2_total)) * 2): sep_words.append(word) sep_words = [w for w in sep_words if not w in nltk.corpus.stopwords.words("english") and len(w) > 5] return sep_words
Finds the words that separate a list of tokens from a background corpus Basically this generates a list of informative/interesting words in a set toks1 is a list of words Returns a list of separator words
12,680
def _soap_client_call(method_name, *args): soap_client = _build_soap_client() soap_args = _convert_soap_method_args(*args) if PYSIMPLESOAP_1_16_2: return getattr(soap_client, method_name)(*soap_args) else: return getattr(soap_client, method_name)(soap_client, *soap_args)
Wrapper to call SoapClient method
12,681
def _configure_registry(self, include_process_stats: bool = False): if include_process_stats: self.registry.register_additional_collector( ProcessCollector(registry=None))
Configure the MetricRegistry.
12,682
def configure(config={}, datastore=None, nested=False): if nested: config = nested_config(config)
Useful for when you need to control Switchboard's setup
12,683
def loads(s, encoding=None, cls=None, object_hook=None, **kw): if cls is None: cls = JSONDecoder if object_hook is not None: kw[] = object_hook return cls(encoding=encoding, **kw).decode(s)
Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON document) to a Python object. If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name must be specified. Encodings that are not ASCII based (such as UCS-2) are not allowed and should be decoded to ``unicode`` first. ``object_hook`` is an optional function that will be called with the result of any object literal decode (a ``dict``). The return value of ``object_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders (e.g. JSON-RPC class hinting). To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg.
12,684
def emit( self, tup, stream=None, anchors=None, direct_task=None, need_task_ids=False ): if anchors is None: anchors = self._current_tups if self.auto_anchor else [] anchors = [a.id if isinstance(a, Tuple) else a for a in anchors] return super(Bolt, self).emit( tup, stream=stream, anchors=anchors, direct_task=direct_task, need_task_ids=need_task_ids, )
Emit a new Tuple to a stream. :param tup: the Tuple payload to send to Storm, should contain only JSON-serializable data. :type tup: :class:`list` or :class:`pystorm.component.Tuple` :param stream: the ID of the stream to emit this Tuple to. Specify ``None`` to emit to default stream. :type stream: str :param anchors: IDs the Tuples (or :class:`pystorm.component.Tuple` instances) which the emitted Tuples should be anchored to. If ``auto_anchor`` is set to ``True`` and you have not specified ``anchors``, ``anchors`` will be set to the incoming/most recent Tuple ID(s). :type anchors: list :param direct_task: the task to send the Tuple to. :type direct_task: int :param need_task_ids: indicate whether or not you'd like the task IDs the Tuple was emitted (default: ``False``). :type need_task_ids: bool :returns: ``None``, unless ``need_task_ids=True``, in which case it will be a ``list`` of task IDs that the Tuple was sent to if. Note that when specifying direct_task, this will be equal to ``[direct_task]``.
12,685
def _restore_file_lmt(self): if not self._restore_file_properties.lmt or self._ase.lmt is None: return ts = time.mktime(self._ase.lmt.timetuple()) os.utime(str(self.final_path), (ts, ts))
Restore file lmt for file :param Descriptor self: this
12,686
def compare_mim_panels(self, existing_panel, new_panel): existing_genes = set([gene[] for gene in existing_panel[]]) new_genes = set([gene[] for gene in new_panel[]]) return new_genes.difference(existing_genes)
Check if the latest version of OMIM differs from the most recent in database Return all genes that where not in the previous version. Args: existing_panel(dict) new_panel(dict) Returns: new_genes(set(str))
12,687
def cd(path): t exist' old_dir = os.getcwd() try: os.makedirs(path) except OSError: pass os.chdir(path) try: yield finally: os.chdir(old_dir)
Creates the path if it doesn't exist
12,688
def service_define(self, service, ty): assert service not in self._data assert service not in self._algebs + self._states self._service.append(service) self._service_ty.append(ty)
Add a service variable of type ``ty`` to this model :param str service: variable name :param type ty: variable type :return: None
12,689
def get_my_ip(): ip = subprocess.check_output(GET_IP_CMD, shell=True).decode()[:-1] return ip.strip()
Returns this computers IP address as a string.
12,690
def cast_item(cls, item): if not isinstance(item, cls.subtype): incompatible = isinstance(item, Base) and not any( issubclass(cls.subtype, tag_type) and isinstance(item, tag_type) for tag_type in cls.all_tags.values() ) if incompatible: raise IncompatibleItemType(item, cls.subtype) try: return cls.subtype(item) except EndInstantiation: raise ValueError( ) from None except (IncompatibleItemType, CastError): raise except Exception as exc: raise CastError(item, cls.subtype) from exc return item
Cast list item to the appropriate tag type.
12,691
def capture_working_directory(self): workdir = os.path.join(self._path, "tmp", "captures") if not self._deleted: try: os.makedirs(workdir, exist_ok=True) except OSError as e: raise aiohttp.web.HTTPInternalServerError(text="Could not create the capture working directory: {}".format(e)) return workdir
Returns a working directory where to temporary store packet capture files. :returns: path to the directory
12,692
def _histogram_fixed_binsize(a, start, width, n): return flib.fixed_binsize(a, start, width, n)
histogram_even(a, start, width, n) -> histogram Return an histogram where the first bin counts the number of lower outliers and the last bin the number of upper outliers. Works only with fixed width bins. :Stochastics: a : array Array of samples. start : float Left-most bin edge. width : float Width of the bins. All bins are considered to have the same width. n : int Number of bins. :Return: H : array Array containing the number of elements in each bin. H[0] is the number of samples smaller than start and H[-1] the number of samples greater than start + n*width.
12,693
def stack(self, k=5, stratify=False, shuffle=True, seed=100, full_test=True, add_diff=False): result_train = [] result_test = [] y = None for model in self.models: result = model.stack(k=k, stratify=stratify, shuffle=shuffle, seed=seed, full_test=full_test) train_df = pd.DataFrame(result.X_train, columns=generate_columns(result.X_train, model.name)) test_df = pd.DataFrame(result.X_test, columns=generate_columns(result.X_test, model.name)) result_train.append(train_df) result_test.append(test_df) if y is None: y = result.y_train result_train = pd.concat(result_train, axis=1) result_test = pd.concat(result_test, axis=1) if add_diff: result_train = feature_combiner(result_train) result_test = feature_combiner(result_test) ds = Dataset(X_train=result_train, y_train=y, X_test=result_test) return ds
Stacks sequence of models. Parameters ---------- k : int, default 5 Number of folds. stratify : bool, default False shuffle : bool, default True seed : int, default 100 full_test : bool, default True If True then evaluate test dataset on the full data otherwise take the mean of every fold. add_diff : bool, default False Returns ------- `DataFrame` Examples -------- >>> pipeline = ModelsPipeline(model_rf,model_lr) >>> stack_ds = pipeline.stack(k=10, seed=111)
12,694
def filter_data(data, filter_dict): for key, match_string in filter_dict.items(): if key not in data: logger.warning("{0} doesn't match a top level key".format(key)) continue values = data[key] matcher = re.compile(match_string) if isinstance(values, list): values = [v for v in values if matcher.search(v)] elif isinstance(values, dict): values = dict((k, v) for k, v in values.items() if matcher.search(k)) else: raise MiuraException("cannot filter a {0}".format(type(values))) data[key] = values
filter a data dictionary for values only matching the filter
12,695
def threadpooled( func: typing.Callable[..., typing.Union["typing.Awaitable[typing.Any]", typing.Any]], *, loop_getter: typing.Union[typing.Callable[..., asyncio.AbstractEventLoop], asyncio.AbstractEventLoop], loop_getter_need_context: bool = False, ) -> typing.Callable[..., "asyncio.Task[typing.Any]"]:
Overload: function callable, loop getter available.
12,696
def get_events(self, service_location_id, appliance_id, start, end, max_number=None): start = self._to_milliseconds(start) end = self._to_milliseconds(end) url = urljoin(URLS[], service_location_id, "events") headers = {"Authorization": "Bearer {}".format(self.access_token)} params = { "from": start, "to": end, "applianceId": appliance_id, "maxNumber": max_number } r = requests.get(url, headers=headers, params=params) r.raise_for_status() return r.json()
Request events for a given appliance Parameters ---------- service_location_id : int appliance_id : int start : int | dt.datetime | pd.Timestamp end : int | dt.datetime | pd.Timestamp start and end support epoch (in milliseconds), datetime and Pandas Timestamp timezone-naive datetimes are assumed to be in UTC max_number : int, optional The maximum number of events that should be returned by this query Default returns all events in the selected period Returns ------- dict
12,697
def fmt(self): tmpl = string.Template(self.template) kw = {} for key, val in self.kw.items(): if key == : kw[key] = val else: kw[key] = val.fmt() return tmpl.substitute(kw)
Make printable representation out of this instance.
12,698
def find_by_id(self, section, params={}, **options): path = "/sections/%s" % (section) return self.client.get(path, params, **options)
Returns the complete record for a single section. Parameters ---------- section : {Id} The section to get. [params] : {Object} Parameters for the request
12,699
def get_header(self, hdrclass, returnval=None): t found. ' if isinstance(hdrclass, str): return self.get_header_by_name(hdrclass) for hdr in self._headers: if isinstance(hdr, hdrclass): return hdr return returnval
Return the first header object that is of class hdrclass, or None if the header class isn't found.