Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
800
def autocorrelation(x, lag): if type(x) is pd.Series: x = x.values if len(x) < lag: return np.nan y1 = x[:(len(x)-lag)] y2 = x[lag:] x_mean = np.mean(x) sum_product = np.sum((y1 - x_mean) * (y2 - x_mean)) v = np.var(x) if np.isclose(v, 0): return np.NaN else: return sum_product / ((len(x) - lag) * v)
Calculates the autocorrelation of the specified lag, according to the formula [1] .. math:: \\frac{1}{(n-l)\sigma^{2}} \\sum_{t=1}^{n-l}(X_{t}-\\mu )(X_{t+l}-\\mu) where :math:`n` is the length of the time series :math:`X_i`, :math:`\sigma^2` its variance and :math:`\mu` its mean. `l` denotes the lag. .. rubric:: References [1] https://en.wikipedia.org/wiki/Autocorrelation#Estimation :param x: the time series to calculate the feature of :type x: numpy.ndarray :param lag: the lag :type lag: int :return: the value of this feature :return type: float
801
def paste_clipboard(self, event): log.critical("paste clipboard") clipboard = self.root.clipboard_get() for line in clipboard.splitlines(): log.critical("paste line: %s", repr(line)) self.add_user_input(line + "\r")
Send the clipboard content as user input to the CPU.
802
def _method_error_handler(self, response: Dict[str, Any]): exp = response.get() code = response.get("CODE") ID = exp.get("ID") raise abort(code, ID=ID, message=exp.get())
处理400~499段状态码,为对应的任务设置异常. Parameters: (response): - 响应的python字典形式数据 Return: (bool): - 准确地说没有错误就会返回True
803
def _must_be_deleted(local_path, r_st): if not os.path.lexists(local_path): return True l_st = os.lstat(local_path) if S_IFMT(r_st.st_mode) != S_IFMT(l_st.st_mode): return True return False
Return True if the remote correspondent of local_path has to be deleted. i.e. if it doesn't exists locally or if it has a different type from the remote one.
804
def receive(self): try: buffer = self._socket.recv(BUFFER_SIZE) except socket.timeout as error: buffering = False else: try: more = self._socket.recv(BUFFER_SIZE) except socket.timeout: more = None if not more: buffering = False response = buffer.decode("utf8") else: buffer += more return response
Receive TCP response, looping to get whole thing or timeout.
805
def binarize_signal(signal, treshold="auto", cut="higher"): if treshold == "auto": treshold = (np.max(np.array(signal)) - np.min(np.array(signal)))/2 signal = list(signal) binary_signal = [] for i in range(len(signal)): if cut == "higher": if signal[i] > treshold: binary_signal.append(1) else: binary_signal.append(0) else: if signal[i] < treshold: binary_signal.append(1) else: binary_signal.append(0) return(binary_signal)
Binarize a channel based on a continuous channel. Parameters ---------- signal = array or list The signal channel. treshold = float The treshold value by which to select the events. If "auto", takes the value between the max and the min. cut = str "higher" or "lower", define the events as above or under the treshold. For photosensors, a white screen corresponds usually to higher values. Therefore, if your events were signalled by a black colour, events values would be the lower ones, and you should set the cut to "lower". Returns ---------- list binary_signal Example ---------- >>> import neurokit as nk >>> binary_signal = nk.binarize_signal(signal, treshold=4) Authors ---------- - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ Dependencies ---------- None
806
def _tracked_model_diff(self): initial_state = self._tracked_model_initial_state current_state = serializer.dump_model(self) if current_state == initial_state: return None change_log = {} for field in initial_state: old_value = initial_state[field][Field.VALUE] new_value = current_state[field][Field.VALUE] if old_value == new_value: continue field_data = initial_state.copy()[field] del field_data[Field.VALUE] field_data[Field.OLD] = old_value field_data[Field.NEW] = new_value change_log[field] = field_data return change_log or None
Returns changes made to model instance. Returns None if no changes were made.
807
def wp_status(self): try: print("Have %u of %u waypoints" % (self.wploader.count()+len(self.wp_received), self.wploader.expected_count)) except Exception: print("Have %u waypoints" % (self.wploader.count()+len(self.wp_received)))
show status of wp download
808
def is_rfc2822(instance: str): if not isinstance(instance, str): return True return email.utils.parsedate(instance) is not None
Validates RFC2822 format
809
def stream(self): self._connection.request( , {: [{: self.artist.id, : , : self.id, : 1}], : self._connection.session.queue}, self._connection.header(, )) stream_info = self._connection.request( , {: self.id, : self._connection.session.country, : False, : False}, self._connection.header(, ))[1] return Stream(stream_info[], stream_info[], self._connection)
:class:`Stream` object for playing
810
def format(self, tokensource, outfile): self._create_drawables(tokensource) self._draw_line_numbers() im = Image.new( , self._get_image_size(self.maxcharno, self.maxlineno), self.background_color ) self._paint_line_number_bg(im) draw = ImageDraw.Draw(im) if self.hl_lines: x = self.image_pad + self.line_number_width - self.line_number_pad + 1 recth = self._get_line_height() rectw = im.size[0] - x for linenumber in self.hl_lines: y = self._get_line_y(linenumber - 1) draw.rectangle([(x, y), (x + rectw, y + recth)], fill=self.hl_color) for pos, value, font, kw in self.drawables: draw.text(pos, value, font=font, **kw) im.save(outfile, self.image_format.upper())
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. This implementation calculates where it should draw each token on the pixmap, then calculates the required pixmap size and draws the items.
811
def has_frames(self, destination): session = meta.Session() sel = select([model.frames_table.c.message_id]).where( model.frames_table.c.destination == destination) result = session.execute(sel) first = result.fetchone() return first is not None
Whether specified queue has any frames. @param destination: The queue name (destinationination). @type destination: C{str} @return: Whether there are any frames in the specified queue. @rtype: C{bool}
812
def enableGroup(self): radioButtonListInGroup = PygWidgetsRadioButton.__PygWidgets__Radio__Buttons__Groups__Dicts__[self.group] for radioButton in radioButtonListInGroup: radioButton.enable()
Enables all radio buttons in the group.
813
def fuzzy_index_match(possiblities, label, **kwargs): possibilities = list(possiblities) if isinstance(label, basestring): return fuzzy_get(possibilities, label, **kwargs) if isinstance(label, int): return possibilities[label] if isinstance(label, list): return [fuzzy_get(possibilities, lbl) for lbl in label]
Find the closest matching column label, key, or integer indexed value Returns: type(label): sequence of immutable objects corresponding to best matches to each object in label if label is an int returns the object (value) in the list of possibilities at that index if label is a str returns the closest str match in possibilities >>> from collections import OrderedDict as odict >>> fuzzy_index_match(pd.DataFrame(pd.np.random.randn(9,4), columns=list('ABCD'), index=range(9)), 'b') 'B' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), 'r2d2') '2' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), 1) '2' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), -1) '5' >>> fuzzy_index_match(odict(zip(range(4),'FOUR')), -4) 0
814
def delete_group_policy(self, group_name, policy_name): params = { : group_name, : policy_name} return self.get_response(, params, verb=)
Deletes the specified policy document for the specified group. :type group_name: string :param group_name: The name of the group the policy is associated with. :type policy_name: string :param policy_name: The policy document to delete.
815
def load_copy_of_template(self, name, *parameters): template, fields, header_fields = self._set_templates_fields_and_header_fields(name, parameters) copy_of_template = copy.deepcopy(template) copy_of_fields = copy.deepcopy(fields) self._init_new_message_stack(copy_of_template, copy_of_fields, header_fields)
Load a copy of message template saved with `Save template` when originally saved values need to be preserved from test to test. Optional parameters are default values for message header separated with colon. Examples: | Load Copy Of Template | MyMessage | header_field:value |
816
def expand(string, vars, local_vars={}): def exp(m): var = m.group(1) if var == : return return local_vars.get(var, vars.get(var, )) return re.sub(r, exp, string)
Expand a string containing $vars as Ninja would. Note: doesn't handle the full Ninja variable syntax, but it's enough to make configure.py's use of it work.
817
def __init(self): if self._portalId is None: from .administration import Administration portalSelf = Administration(url=self._securityHandler.org_url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port).portals.portalSelf self._portalId = portalSelf.id self._currentUser = portalSelf.user[]
loads the property data into the class
818
def on_conflict(self, fields: List[Union[str, Tuple[str]]], action, index_predicate: str=None): self.conflict_target = fields self.conflict_action = action self.index_predicate = index_predicate return self
Sets the action to take when conflicts arise when attempting to insert/create a new row. Arguments: fields: The fields the conflicts can occur in. action: The action to take when the conflict occurs. index_predicate: The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking conflicts)
819
def _get_values(values, skipna, fill_value=None, fill_value_typ=None, isfinite=False, copy=True, mask=None): if is_datetime64tz_dtype(values): dtype = values.dtype values = getattr(values, "_values", values) else: values = com.values_from_object(values) dtype = values.dtype if mask is None: if isfinite: mask = _isfinite(values) else: mask = isna(values) if is_datetime_or_timedelta_dtype(values) or is_datetime64tz_dtype(values): values = getattr(values, "asi8", values) values = values.view(np.int64) dtype_ok = _na_ok_dtype(dtype) fill_value = _get_fill_value(dtype, fill_value=fill_value, fill_value_typ=fill_value_typ) if skipna: if copy: values = values.copy() if dtype_ok: np.putmask(values, mask, fill_value) else: values, changed = maybe_upcast_putmask(values, mask, fill_value) elif copy: values = values.copy() dtype_max = dtype if is_integer_dtype(dtype) or is_bool_dtype(dtype): dtype_max = np.int64 elif is_float_dtype(dtype): dtype_max = np.float64 return values, mask, dtype, dtype_max, fill_value
utility to get the values view, mask, dtype if necessary copy and mask using the specified fill_value copy = True will force the copy
820
def distribute_equally(daily_data, divide=False): index = hourly_index(daily_data.index) hourly_data = daily_data.reindex(index) hourly_data = hourly_data.groupby(hourly_data.index.day).transform( lambda x: x.fillna(method=, limit=23)) if divide: hourly_data /= 24 return hourly_data
Obtains hourly values by equally distributing the daily values. Args: daily_data: daily values divide: if True, divide resulting values by the number of hours in order to preserve the daily sum (required e.g. for precipitation). Returns: Equally distributed hourly values.
821
def is_enhanced_rr_cap_valid(self): if not self.recv_open_msg: raise ValueError() err_cap_enabled = False local_caps = self.sent_open_msg.opt_param peer_caps = self.recv_open_msg.opt_param local_cap = [cap for cap in local_caps if cap.cap_code == BGP_CAP_ENHANCED_ROUTE_REFRESH] peer_cap = [cap for cap in peer_caps if cap.cap_code == BGP_CAP_ENHANCED_ROUTE_REFRESH] if local_cap and peer_cap: err_cap_enabled = True return err_cap_enabled
Checks is enhanced route refresh capability is enabled/valid. Checks sent and received `Open` messages to see if this session with peer is capable of enhanced route refresh capability.
822
def tachogram(data, sample_rate, signal=False, in_seconds=False, out_seconds=False): if signal is False: data_copy = data time_axis = numpy.array(data) if out_seconds is True and in_seconds is False: time_axis = time_axis / sample_rate else: data_copy = detect_r_peaks(data, sample_rate, time_units=out_seconds, volts=False, resolution=None, plot_result=False)[0] time_axis = data_copy tachogram_data = numpy.diff(time_axis) tachogram_time = time_axis[1:] return tachogram_data, tachogram_time
Function for generation of ECG Tachogram. ---------- Parameters ---------- data : list ECG signal or R peak list. When the input is a raw signal the input flag signal should be True. sample_rate : int Sampling frequency. signal : boolean If True, then the data argument contains the set of the ECG acquired samples. in_seconds : boolean If the R peaks list defined as the input argument "data" contains the sample numbers where the R peaks occur, then in_seconds needs to be False. out_seconds : boolean If True then each sample of the returned time axis is expressed in seconds. Returns ------- out : list, list List of tachogram samples. List of instants where each cardiac cycle ends.
823
def range(start, finish, step): value = start while value <= finish: yield value value += step
Like built-in :func:`~builtins.range`, but with float support
824
def find(collection, query=None, user=None, password=None, host=None, port=None, database=, authdb=None): *[{"foo": "FOO", "bar": "BAR"}] conn = _connect(user, password, host, port, database, authdb) if not conn: return try: query = _to_dict(query) except Exception as err: return err try: log.info("Searching for %r in %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.find(query) return list(ret) except pymongo.errors.PyMongoError as err: log.error("Searching objects failed with error: %s", err) return err
Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database>
825
def protorpc_to_endpoints_error(self, status, body): try: rpc_error = self.__PROTOJSON.decode_message(remote.RpcStatus, body) except (ValueError, messages.ValidationError): rpc_error = remote.RpcStatus() if rpc_error.state == remote.RpcStatus.State.APPLICATION_ERROR: error_class = _ERROR_NAME_MAP.get(rpc_error.error_name) if error_class: status, body = self.__write_error(error_class.http_status, rpc_error.error_message) return status, body
Convert a ProtoRPC error to the format expected by Google Endpoints. If the body does not contain an ProtoRPC message in state APPLICATION_ERROR the status and body will be returned unchanged. Args: status: HTTP status of the response from the backend body: JSON-encoded error in format expected by Endpoints frontend. Returns: Tuple of (http status, body)
826
def _append_unknown_char(self): if self.unknown_strategy == UNKNOWN_INCLUDE and \ self.unknown_char is not None: self._append_to_stack(self.unknown_char) self.unknown_char = None
Appends the unknown character, in case one was encountered.
827
def create_enterprise_session(url, token=None): gh_session = github3.enterprise_login(url=url, token=token) if gh_session is None: msg = raise RuntimeError(msg, url) return gh_session
Create a github3.py session for a GitHub Enterprise instance If token is not provided, will attempt to use the GITHUB_API_TOKEN environment variable if present.
828
def set_result(self, result, from_tree=False): if self._read_only: if not from_tree: LOGGER.warning("Tried to set address %s on a" " read-only context.", self.address) return with self._condition: if self._read_only: if not from_tree: LOGGER.warning("Tried to set address %s on a" " read-only context.", self.address) return if from_tree: if not self._result_set_in_context: self._result = result self._tree_has_set = True else: self._result = result self._result_set_in_context = True self._deleted = False self._condition.notify_all()
Set the addresses's value unless the future has been declared read only. Args: result (bytes): The value at an address. from_tree (bool): Whether the value is being set by a read from the merkle tree. Returns: None
829
def confine(x,low,high): y=x.copy() y[y < low] = low y[y > high] = high return y
Confine x to [low,high]. Values outside are set to low/high. See also restrict.
830
def _autocomplete(client, url_part, input_text, session_token=None, offset=None, location=None, radius=None, language=None, types=None, components=None, strict_bounds=False): params = {"input": input_text} if session_token: params["sessiontoken"] = session_token if offset: params["offset"] = offset if location: params["location"] = convert.latlng(location) if radius: params["radius"] = radius if language: params["language"] = language if types: params["types"] = types if components: if len(components) != 1 or list(components.keys())[0] != "country": raise ValueError("Only country components are supported") params["components"] = convert.components(components) if strict_bounds: params["strictbounds"] = "true" url = "/maps/api/place/%sautocomplete/json" % url_part return client._request(url, params).get("predictions", [])
Internal handler for ``autocomplete`` and ``autocomplete_query``. See each method's docs for arg details.
831
def _append_integer(self, value, _file): _tabs = * self._tctr _text = value _labs = .format(tabs=_tabs, text=_text) _file.write(_labs)
Call this function to write integer contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file
832
def allow_rwe(self, name): assert name in PERMISSIONS.keys() os.chmod(self.file_path, PERMISSIONS[name][])
Allow all privileges for a particular name group (user, group, other).
833
def check_result(data, key=): if not isinstance(data, dict): return False if key: if key in data: return True return False if in data.keys(): return True if data.get(, -1) == 0 else False elif in data.keys(): return True if data.get(, -1) == 0 else False return False
Check the result of an API response. Ideally, this should be done by checking that the value of the ``resultCode`` attribute is 0, but there are endpoints that simply do not follow this rule. Args: data (dict): Response obtained from the API endpoint. key (string): Key to check for existence in the dict. Returns: bool: True if result was correct, False otherwise.
834
def prepare(self): accept_header = self.request.headers.get(, ) parsed_accept_header = [cgi.parse_header(h)[0] for h in accept_header.split()] def find_acceptable_content_type(parsed_accept_header): for media_range in parsed_accept_header: for content_type, dumper in self.ct_out_map: if fnmatch.fnmatch(content_type, media_range): return content_type, dumper return None, None content_type, dumper = find_acceptable_content_type(parsed_accept_header) if not content_type: self.send_error(406) self.content_type = content_type self.dumper = dumper self.start = time.time() self.connected = True self.lowstate = self._get_lowstate()
Run before get/posts etc. Pre-flight checks: - verify that we can speak back to them (compatible accept header)
835
def findViewType(self, viewTypeName): for viewType in self._viewTypes: if ( viewType.viewTypeName() == viewTypeName ): return viewType return None
Looks up the view type based on the inputed view type name. :param viewTypeName | <str>
836
def repr_as_line(self, additional_columns=None, only_show=None, sep=): additional_columns = additional_columns or [] if only_show is not None: columns = _uniquify(only_show) else: columns = _uniquify(self.DEFAULT_COLUMNS + additional_columns) to_display = [self._get_attrib(c, convert_to_str=True) for c in columns] return sep.join(to_display)
Returns a representation of the host as a single line, with columns joined by ``sep``. :param additional_columns: Columns to show in addition to defaults. :type additional_columns: ``list`` of ``str`` :param only_show: A specific list of columns to show. :type only_show: ``NoneType`` or ``list`` of ``str`` :param sep: The column separator to use. :type sep: ``str`` :rtype: ``str``
837
def delete_data_source(self, data_source): source_type = [k for k in data_source.keys()][0] complete_source = self.get_data_sources( source_id=data_source[source_type][]) folder_id = complete_source[source_type][0][] self.delete_folders(folder_ids=[folder_id]) return self.request(, data_source)
Delete data source with it's name or ID. data_source = { 'imap': {'name': 'data-source-name'}} or data_source = { 'pop3': {'id': 'data-source-id'}}
838
def _enqueueIntoAllRemotes(self, msg: Any, signer: Signer) -> None: for rid in self.remotes.keys(): self._enqueue(msg, rid, signer)
Enqueue the specified message into all the remotes in the nodestack. :param msg: the message to enqueue
839
def send_command(self, command, arg=None): if arg is not None: command = % (command, arg) self._write(six.StringIO(command), len(command))
Sends a command to the device. Args: command: The command to send. arg: Optional argument to the command.
840
def do_api_calls_update_cache(self): zones = self.parse_env_zones() data = self.group_instances(zones) self.cache.write_to_cache(data) self.inventory = data
Do API calls and save data in cache.
841
def create_table( self, table_name, obj=None, schema=None, database=None, external=False, force=False, format=, location=None, partition=None, like_parquet=None, ): if like_parquet is not None: raise NotImplementedError if obj is not None: if isinstance(obj, pd.DataFrame): from ibis.impala.pandas_interop import write_temp_dataframe writer, to_insert = write_temp_dataframe(self, obj) else: to_insert = obj ast = self._build_ast(to_insert, ImpalaDialect.make_context()) select = ast.queries[0] statement = ddl.CTAS( table_name, select, database=database, can_exist=force, format=format, external=external, partition=partition, path=location, ) elif schema is not None: statement = ddl.CreateTableWithSchema( table_name, schema, database=database, format=format, can_exist=force, external=external, path=location, partition=partition, ) else: raise com.IbisError() return self._execute(statement)
Create a new table in Impala using an Ibis table expression. This is currently designed for tables whose data is stored in HDFS (or eventually other filesystems). Parameters ---------- table_name : string obj : TableExpr or pandas.DataFrame, optional If passed, creates table from select statement results schema : ibis.Schema, optional Mutually exclusive with expr, creates an empty table with a particular schema database : string, default None (optional) force : boolean, default False Do not create table if table with indicated name already exists external : boolean, default False Create an external table; Impala will not delete the underlying data when the table is dropped format : {'parquet'} location : string, default None Specify the directory location where Impala reads and writes files for the table partition : list of strings Must pass a schema to use this. Cannot partition from an expression (create-table-as-select) like_parquet : string (HDFS path), optional Can specify in lieu of a schema Examples -------- >>> con.create_table('new_table_name', table_expr) # doctest: +SKIP
842
def get_filter(self, header=None, origin=1): from .region_to_filter import as_region_filter if header is None: if not self.check_imagecoord(): raise RuntimeError("the region has non-image coordinate. header is required.") reg_in_imagecoord = self else: reg_in_imagecoord = self.as_imagecoord(header) region_filter = as_region_filter(reg_in_imagecoord, origin=origin) return region_filter
Get filter. Often, the regions files implicitly assume the lower-left corner of the image as a coordinate (1,1). However, the python convetion is that the array index starts from 0. By default (``origin=1``), coordinates of the returned mpl artists have coordinate shifted by (1, 1). If you do not want this shift, use ``origin=0``. Parameters ---------- header : `astropy.io.fits.Header` FITS header origin : {0, 1} Pixel coordinate origin Returns ------- filter : TODO Filter object
843
def calc_list(request, id=None): base_url = _get_base_url(request) calc_data = logs.dbcmd(, request.GET, utils.get_valid_users(request), utils.get_acl_on(request), id) response_data = [] username = psutil.Process(os.getpid()).username() for (hc_id, owner, status, calculation_mode, is_running, desc, pid, parent_id, size_mb) in calc_data: url = urlparse.urljoin(base_url, % hc_id) abortable = False if is_running: try: if psutil.Process(pid).username() == username: abortable = True except psutil.NoSuchProcess: pass response_data.append( dict(id=hc_id, owner=owner, calculation_mode=calculation_mode, status=status, is_running=bool(is_running), description=desc, url=url, parent_id=parent_id, abortable=abortable, size_mb=size_mb)) if id is not None: [response_data] = response_data return HttpResponse(content=json.dumps(response_data), content_type=JSON)
Get a list of calculations and report their id, status, calculation_mode, is_running, description, and a url where more detailed information can be accessed. This is called several times by the Javascript. Responses are in JSON.
844
def set_content_disposition(self, disptype: str, quote_fields: bool=True, **params: Any) -> None: self._headers[hdrs.CONTENT_DISPOSITION] = content_disposition_header( disptype, quote_fields=quote_fields, **params)
Sets ``Content-Disposition`` header.
845
def nice_pkg_name(name): logger.debug("%s", name) root, ext = os.path.splitext(name) logger.debug("root :, ext: ", root, ext) if ext in ugly_ext: logger.debug("remove ext %s to get %s", ext, root) return root logger.debug("no change %s", name) return name
todo: Docstring for nice_pkg_name :param name: arg description :type name: type description :return: :rtype:
846
def render_with(template=None, json=False, jsonp=False): if jsonp: templates = { : dict_jsonp, : dict_jsonp, } elif json: templates = { : dict_jsonify, } else: templates = {} if isinstance(template, six.string_types): templates[] = template elif isinstance(template, dict): templates.update(template) elif template is None and (json or jsonp): pass else: raise ValueError("Expected string or dict for template") default_mimetype = if not in templates: templates[] = six.text_type default_mimetype = for mimetype in (, , ): if mimetype in templates: templates[] = templates[mimetype] default_mimetype = mimetype if isinstance(result, (Response, WerkzeugResponse, current_app.response_class)): return result if isinstance(result, tuple): resultset = result result = resultset[0] if len(resultset) > 1: status_code = resultset[1] else: status_code = None if len(resultset) > 2: headers = Headers(resultset[2]) else: headers = Headers() else: status_code = None headers = Headers() if len(templates) > 1: if in headers: vary_values = [item.strip() for item in headers[].split()] if not in vary_values: vary_values.append() headers[] = .join(vary_values) else: headers[] = use_mimetype = None if render and request: use_mimetype = _best_mimetype_match(template_mimetypes, request.accept_mimetypes, ) if use_mimetype is not None: if callable(templates[use_mimetype]): rendered = templates[use_mimetype](result) if isinstance(rendered, Response): if status_code is not None: rendered.status_code = status_code if headers is not None: rendered.headers.extend(headers) else: rendered = current_app.response_class( rendered, status=status_code, headers=headers, mimetype=default_mimetype if use_mimetype == else use_mimetype) else: rendered = current_app.response_class( render_template(templates[use_mimetype], **result), status=status_code or 200, headers=headers, mimetype=default_mimetype if use_mimetype == else use_mimetype) return rendered else: return result return decorated_function return inner
Decorator to render the wrapped function with the given template (or dictionary of mimetype keys to templates, where the template is a string name of a template file or a callable that returns a Response). The function's return value must be a dictionary and is passed to the template as parameters. Callable templates get a single parameter with the function's return value. Usage:: @app.route('/myview') @render_with('myview.html') def myview(): return {'data': 'value'} @app.route('/myview_with_json') @render_with('myview.html', json=True) def myview_no_json(): return {'data': 'value'} @app.route('/otherview') @render_with({ 'text/html': 'otherview.html', 'text/xml': 'otherview.xml'}) def otherview(): return {'data': 'value'} @app.route('/404view') @render_with('myview.html') def myview(): return {'error': '404 Not Found'}, 404 @app.route('/headerview') @render_with('myview.html') def myview(): return {'data': 'value'}, 200, {'X-Header': 'Header value'} When a mimetype is specified and the template is not a callable, the response is returned with the same mimetype. Callable templates must return Response objects to ensure the correct mimetype is set. If a dictionary of templates is provided and does not include a handler for ``*/*``, render_with will attempt to use the handler for (in order) ``text/html``, ``text/plain`` and the various JSON types, falling back to rendering the value into a unicode string. If the method is called outside a request context, the wrapped method's original return value is returned. This is meant to facilitate testing and should not be used to call the method from within another view handler as the presence of a request context will trigger template rendering. Rendering may also be suspended by calling the view handler with ``_render=False``. render_with provides JSON and JSONP handlers for the ``application/json``, ``text/json`` and ``text/x-json`` mimetypes if ``json`` or ``jsonp`` is True (default is False). :param template: Single template, or dictionary of MIME type to templates. If the template is a callable, it is called with the output of the wrapped function :param json: Helper to add a JSON handler (default is False) :param jsonp: Helper to add a JSONP handler (if True, also provides JSON, default is False)
847
def getStartNodes(fdefs,calls): s=[] for source in fdefs: for fn in fdefs[source]: inboundEdges=False for call in calls: if call.target==fn: inboundEdges=True if not inboundEdges: s.append(fn) return s
Return a list of nodes in fdefs that have no inbound edges
848
def _queue_into_buffer(transfersession): last_saved_by_conditions = [] filter_prefixes = Filter(transfersession.filter) server_fsic = json.loads(transfersession.server_fsic) client_fsic = json.loads(transfersession.client_fsic) if transfersession.push: fsics = _fsic_queuing_calc(client_fsic, server_fsic) else: fsics = _fsic_queuing_calc(server_fsic, client_fsic) if not fsics: return for instance, counter in six.iteritems(fsics): last_saved_by_conditions += ["(last_saved_instance = AND last_saved_counter > {1})".format(instance, counter)] if fsics: last_saved_by_conditions = [_join_with_logical_operator(last_saved_by_conditions, )] partition_conditions = [] for prefix in filter_prefixes: partition_conditions += ["partition LIKE ".format(prefix)] if filter_prefixes: partition_conditions = [_join_with_logical_operator(partition_conditions, )] fsic_and_partition_conditions = _join_with_logical_operator(last_saved_by_conditions + partition_conditions, ) where_condition = _join_with_logical_operator([fsic_and_partition_conditions, "profile = ".format(transfersession.sync_session.profile)], ) with connection.cursor() as cursor: queue_buffer = .format(outgoing_buffer=Buffer._meta.db_table, transfer_session_id=transfersession.id, condition=where_condition, store=Store._meta.db_table) cursor.execute(queue_buffer) queue_rmc_buffer = .format(outgoing_rmcb=RecordMaxCounterBuffer._meta.db_table, transfer_session_id=transfersession.id, record_max_counter=RecordMaxCounter._meta.db_table, outgoing_buffer=Buffer._meta.db_table) cursor.execute(queue_rmc_buffer)
Takes a chunk of data from the store to be put into the buffer to be sent to another morango instance.
849
def from_zeros(self, lmax, gm, r0, omega=None, errors=False, normalization=, csphase=1): if normalization.lower() not in (, , , ): raise ValueError( "The normalization must be , , , " "or . Input value was {:s}." .format(repr(normalization)) ) if csphase != 1 and csphase != -1: raise ValueError( "csphase must be either 1 or -1. Input value was {:s}." .format(repr(csphase)) ) if normalization.lower() == and lmax > 85: _warnings.warn("Calculations using unnormalized coefficients " "are stable only for degrees less than or equal " "to 85. lmax for the coefficients will be set to " "85. Input value was {:d}.".format(lmax), category=RuntimeWarning) lmax = 85 coeffs = _np.zeros((2, lmax + 1, lmax + 1)) coeffs[0, 0, 0] = 1.0 if errors is False: clm = SHGravRealCoeffs(coeffs, gm=gm, r0=r0, omega=omega, normalization=normalization.lower(), csphase=csphase) else: clm = SHGravRealCoeffs(coeffs, gm=gm, r0=r0, omega=omega, errors=_np.zeros((2, lmax + 1, lmax + 1)), normalization=normalization.lower(), csphase=csphase) return clm
Initialize the class with spherical harmonic coefficients set to zero from degree 1 to lmax, and set the degree 0 term to 1. Usage ----- x = SHGravCoeffs.from_zeros(lmax, gm, r0, [omega, errors, normalization, csphase]) Returns ------- x : SHGravCoeffs class instance. Parameters ---------- lmax : int The maximum spherical harmonic degree l of the coefficients. gm : float The gravitational constant times the mass that is associated with the gravitational potential coefficients. r0 : float The reference radius of the spherical harmonic coefficients. omega : float, optional, default = None The angular rotation rate of the body. errors : bool, optional, default = False If True, initialize the attribute errors with zeros. normalization : str, optional, default = '4pi' '4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized, orthonormalized, Schmidt semi-normalized, or unnormalized coefficients, respectively. csphase : int, optional, default = 1 Condon-Shortley phase convention: 1 to exclude the phase factor, or -1 to include it.
850
def _ask_for_ledger_status(self, node_name: str, ledger_id): self.request_msg(LEDGER_STATUS, {f.LEDGER_ID.nm: ledger_id}, [node_name, ]) logger.info("{} asking {} for ledger status of ledger {}".format(self, node_name, ledger_id))
Ask other node for LedgerStatus
851
def reissue(self, order_id, csr, software_id, organization_handle, approver_email=None, signature_hash_algorithm=None, domain_validation_methods=None, hostnames=None, technical_handle=None): response = self.request(E.reissueSslCertRequest( E.id(order_id), E.csr(csr), E.softwareId(software_id), E.organizationHandle(organization_handle), OE(, approver_email), OE(, signature_hash_algorithm), OE(, domain_validation_methods, transform=_domain_validation_methods), OE(, hostnames, transform=_simple_array), OE(, technical_handle), )) return int(response.data.id)
Reissue an SSL certificate order
852
def order_by(self, **kwargs): if kwargs: col, order = kwargs.popitem() self.order_clause = "order by {col} {order} ".format( col=col, order=order) return self
Analog to SQL "ORDER BY". +kwargs+ should only contain one item. examples) NO: repo.order_by() NO: repo.order_by(id="desc", name="asc") YES: repo.order_by(id="asc)
853
def move_file_to_file(old_path, new_path): try: os.rename(old_path, new_path) except: old_file = os.path.basename(old_path) target_directory, target_file = os.path.dirname( os.path.abspath(new_path)), os.path.basename(new_path) Document.move_file_to_directory( old_path, target_directory ) os.rename(os.path.join(target_directory, old_file), os.path.join(target_directory, target_file))
Moves file from old location to new one :param old_path: path of file to move :param new_path: new path
854
def parse_args(arguments=None, root=None, apply_config=False): if arguments is None: arguments = [] parser = create_parser() args = parser.parse_args(arguments) if apply_config: parser = apply_config_defaults(parser, args, root=root) args = parser.parse_args(arguments) if args.max_line_length <= 0: parser.error() if args.select: args.select = _split_comma_separated(args.select) if args.ignore: args.ignore = _split_comma_separated(args.ignore) elif not args.select and args.aggressive: args.select = [, ] else: args.ignore = _split_comma_separated(DEFAULT_IGNORE) if args.exclude: args.exclude = _split_comma_separated(args.exclude) else: args.exclude = [] return args
Parse the arguments from the CLI. If apply_config then we first look up and apply configs using apply_config_defaults.
855
def filter_db_names(paths: List[str]) -> List[str]: return [ db_path for db_path in paths if VERSION_RE.match(os.path.basename(db_path)) ]
Returns a filtered list of `paths`, where every name matches our format. Args: paths: A list of file names.
856
def to_op(self): if not self._adds and not self._removes: return None changes = {} if self._adds: changes[] = list(self._adds) if self._removes: changes[] = list(self._removes) return changes
Extracts the modification operation from the set. :rtype: dict, None
857
def send_message(self, message, room_id, **kwargs): return SendMessage(settings=self.settings, **kwargs).call( message=message, room_id=room_id, **kwargs )
Send a message to a given room
858
def index_document(self, text, url): "Index the text of a document." title = text[:text.index()].strip() docwords = words(text) docid = len(self.documents) self.documents.append(Document(title, url, len(docwords))) for word in docwords: if word not in self.stopwords: self.index[word][docid] += 1
Index the text of a document.
859
def get_file(self, fax_id, **kwargs): kwargs[] = True if kwargs.get(): return self.get_file_with_http_info(fax_id, **kwargs) else: (data) = self.get_file_with_http_info(fax_id, **kwargs) return data
get a file # noqa: E501 Get your fax archive file using it's id. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_file(fax_id, async=True) >>> result = thread.get() :param async bool :param str fax_id: (required) :param str format: can be 'pdf' or 'tiff' :return: file If the method is called asynchronously, returns the request thread.
860
def to_singular(word): if word[-1] != "s": return word elif word.endswith("ies"): return word[:-3] + "y" elif word.endswith("ses"): return word[:-2] else: return word[:-1]
Attempts to singularize a word.
861
def multihead_attention_2d(query_antecedent, memory_antecedent, total_key_depth, total_value_depth, output_depth, num_heads, attention_type="local_attention_2d", query_shape=(8, 16), memory_flange=(8, 16), name=None): if total_key_depth % num_heads != 0: raise ValueError("Key depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_key_depth, num_heads)) if total_value_depth % num_heads != 0: raise ValueError("Value depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_value_depth, num_heads)) with tf.variable_scope( name, default_name="multihead_attention_2d", values=[query_antecedent, memory_antecedent]): q, k, v = compute_qkv(query_antecedent, memory_antecedent, total_key_depth, total_value_depth) q = split_heads_2d(q, num_heads) k = split_heads_2d(k, num_heads) v = split_heads_2d(v, num_heads) key_depth_per_head = total_key_depth // num_heads q *= key_depth_per_head**-0.5 if attention_type == "local_attention_2d": x = local_attention_2d( q, k, v, query_shape=query_shape, memory_flange=memory_flange) elif attention_type == "masked_local_attention_2d": assert attention_type == "masked_local_attention_2d" x = masked_local_attention_2d( q, k, v, query_shape=query_shape, memory_flange=memory_flange) else: assert attention_type == "unmasked_local_attention_2d_tpu" x = dot_product_unmasked_attention_local_2d_tpu( q, k, v, None, max_relative_position=None, query_shape=query_shape) x = combine_heads_2d(x) x = common_layers.dense( x, output_depth, use_bias=False, name="output_transform") return x
2d Multihead scaled-dot-product attention with inp/output transformations. Args: query_antecedent: a Tensor with shape [batch, h, w, depth_k] memory_antecedent: a Tensor with shape [batch, h, w, depth_k] total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth attention_type: String, type of attention function to use. query_shape: an tuple indicating the height and width of each query block. memory_flange: an integer indicating how much to look in height and width name: an optional string Returns: A Tensor of shape [batch, h, w, output_depth] Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads.
862
def backward(self, speed=1): self.left_motor.backward(speed) self.right_motor.backward(speed)
Drive the robot backward by running both motors backward. :param float speed: Speed at which to drive the motors, as a value between 0 (stopped) and 1 (full speed). The default is 1.
863
def extract(self, variable_idx): branch = self._define_branch(variable_idx) label = self.profiles[variable_idx].replace("\n", "") self.label[variable_idx] = label self.data[variable_idx] = [[], []] with open(self.abspath) as fobj: for line in fobj.readlines()[ variable_idx+1+self._attributes[]:: self._attributes[]+1]: points = [] for point in line.split(): try: points.append(float(point)) except ValueError: pass self.data[variable_idx][1].append(np.array(points)) x_st = self.geometries[branch][0] x_no_st = [(x0+x1)/2 for x0, x1 in zip(x_st[:-1], x_st[1:])] if len(self.data[variable_idx][1][0]) == len(x_st): self.data[variable_idx][0] = np.array(x_st) else: self.data[variable_idx][0] = np.array(x_no_st)
Extract a specific varaible
864
def loadTextureD3D11_Async(self, textureId, pD3D11Device): fn = self.function_table.loadTextureD3D11_Async ppD3D11Texture2D = c_void_p() result = fn(textureId, pD3D11Device, byref(ppD3D11Texture2D)) return result, ppD3D11Texture2D.value
Creates a D3D11 texture and loads data into it.
865
def send_loop(): while True: while not Message.objects.all(): logging.debug("sleeping for %s seconds before checking queue again" % EMPTY_QUEUE_SLEEP) time.sleep(EMPTY_QUEUE_SLEEP) send_all()
Loop indefinitely, checking queue at intervals of EMPTY_QUEUE_SLEEP and sending messages if any are on queue.
866
def _get_I(self, a, b, size, plus_transpose=True): r_sum = np.zeros((3, 3), dtype=, order=) for r in self._rotations_cartesian: for i in range(3): for j in range(3): r_sum[i, j] += r[a, i] * r[b, j] if plus_transpose: r_sum += r_sum.T if (np.abs(r_sum) < 1e-10).all(): return None I_mat = np.zeros((3 * size, 3 * size), dtype=, order=) for i in range(size): I_mat[(i * 3):((i + 1) * 3), (i * 3):((i + 1) * 3)] = r_sum return I_mat
Return I matrix in Chaput's PRL paper. None is returned if I is zero matrix.
867
def byte_size(self, selection=False, virtual=False): bytes_per_row = 0 N = self.count(selection=selection) extra = 0 for column in list(self.get_column_names(virtual=virtual)): dtype = self.dtype(column) dtype_internal = self.dtype(column, internal=True) if isinstance(self.columns[column], ColumnString): extra += self.columns[column].nbytes else: bytes_per_row += dtype_internal.itemsize if np.ma.isMaskedArray(self.columns[column]): bytes_per_row += 1 return bytes_per_row * self.count(selection=selection) + extra
Return the size in bytes the whole DataFrame requires (or the selection), respecting the active_fraction.
868
def CreateBitmap(self, artid, client, size): if artid in self.extra_icons: return wx.Bitmap(self.extra_icons[artid], wx.BITMAP_TYPE_ANY) else: return wx.ArtProvider.GetBitmap(artid, client, size)
Adds custom images to Artprovider
869
def read_version(): regex = re.compile() with open() as f: for line in f: match = regex.match(line) if match: return match.group()
Read version from the first line starting with digit
870
def request( self, method, url, data=None, headers=None, withhold_token=False, client_id=None, client_secret=None, **kwargs ): if not is_secure_transport(url): raise InsecureTransportError() if self.token and not withhold_token: log.debug( "Invoking %d protected resource request hooks.", len(self.compliance_hook["protected_request"]), ) for hook in self.compliance_hook["protected_request"]: log.debug("Invoking hook %s.", hook) url, headers, data = hook(url, headers, data) log.debug("Adding token %s to request.", self.token) try: url, headers, data = self._client.add_token( url, http_method=method, body=data, headers=headers ) except TokenExpiredError: if self.auto_refresh_url: log.debug( "Auto refresh is set, attempting to refresh at %s.", self.auto_refresh_url, ) client_id, ) auth = requests.auth.HTTPBasicAuth(client_id, client_secret) token = self.refresh_token( self.auto_refresh_url, auth=auth, **kwargs ) if self.token_updater: log.debug( "Updating token to %s using %s.", token, self.token_updater ) self.token_updater(token) url, headers, data = self._client.add_token( url, http_method=method, body=data, headers=headers ) else: raise TokenUpdated(token) else: raise log.debug("Requesting url %s using method %s.", url, method) log.debug("Supplying headers %s and data %s", headers, data) log.debug("Passing through key word arguments %s.", kwargs) return super(OAuth2Session, self).request( method, url, headers=headers, data=data, **kwargs )
Intercept all requests and add the OAuth 2 token if present.
871
def parse(self, line): csv_list = line.split(",") date_time_message = csv_list.pop(0).split(" ", 2) otherinfo = dict() for item in csv_list: key_value_pair = item.split(":", 1) key = key_value_pair[0].strip() if len(key_value_pair) > 1: value = key_value_pair[1].strip() if not value: value = "-" else: value = "-" otherinfo[key] = value self.message = \ \ \ \ \ \ \ \ self.params = [ date_time_message[2], date_time_message[0], date_time_message[1], otherinfo.get("request", "-"), otherinfo.get("referrer", "-"), otherinfo.get("server", "-"), otherinfo.get("client", "-"), otherinfo.get("host", "-"), otherinfo.get("upstream", "-"), ] self.site = otherinfo.get("referrer", "-")
Parse a line of the Nginx error log
872
def get_tag(note_store, my_tags): tag_id = [] listtags = note_store.listTags() for my_tag in my_tags.split(): for tag in listtags: if tag.name.lower() == my_tag.lower().lstrip().rstrip(): tag_id.append(tag.guid) break return tag_id
get the tags from his Evernote account :param note_store Evernote Instance :param my_tags string :return: array of the tag to create
873
def __record_progress(self, next_step=None): config.SUSHI_BAR_CLIENT.report_progress( self.get_status(), self.get_status().value/Status.DONE.value) if next_step: now = time.time() config.SUSHI_BAR_CLIENT.report_stage(self.get_status(), now - self.timestamp) self.timestamp = now self.status = next_step with open(self.get_restore_path(Status.LAST), ) as handle, open(self.get_restore_path(), ) as step_handle: pickle.dump(self, handle) pickle.dump(self, step_handle)
__record_progress: save progress to respective restoration file Args: None Returns: None
874
def trace(self, data, callback=None): conn_id = self._find_connection(self.conn_string) if conn_id is not None: self.adapter.notify_event_nowait(self.conn_string, , data) if callback is not None: callback(conn_id is not None)
Queue data for tracing Args: data (bytearray, string): Unstructured data to trace to any connected client. callback (callable): An optional callback that will be called with a bool value of True when this data actually gets traced. If the client disconnects and the data is dropped instead, callback will be called with False.
875
def on_graphs_menu_close(self, update): logging.info("closing sensor menu, update=%s", update) if update: for sensor, visible_sensors in \ self.graphs_menu.active_sensors.items(): self.graphs[sensor].set_visible_graphs(visible_sensors) if sensor in self.visible_graphs and not any(visible_sensors): del self.visible_graphs[sensor] elif not any(visible_sensors): pass else: self.visible_graphs[sensor] = self.graphs[sensor] self.show_graphs() self.original_widget = self.main_window_w
Return to main screen and update sensor that are active in the view
876
def pointerEvent(self, x, y, buttonmask=0): self.transport.write(pack("!BBHH", 5, buttonmask, x, y))
Indicates either pointer movement or a pointer button press or release. The pointer is now at (x-position, y-position), and the current state of buttons 1 to 8 are represented by bits 0 to 7 of button-mask respectively, 0 meaning up, 1 meaning down (pressed).
877
def indent(rows, hasHeader=False, headerChar=, delim=, justify=, separateRows=False, prefix=, postfix=, wrapfunc=lambda x: x): names. - headerChar: Character to be used for the row separator line (if hasHeader==True or separateRows==True). - delim: The column delimiter. - justify: Determines how are data justified in their column. Valid values are , and . - separateRows: True if rows are to be separated by a line of s. - prefix: A string prepended to each printed row. - postfix: A string appended to each printed row. - wrapfunc: A function f(text) for wrapping text; each element in the table is first wrapped by this function.\ncenterrightleft': str.ljust}[justify.lower()] output = cStringIO.StringIO() if separateRows: print >> output, rowSeparator for physicalRows in logicalRows: for row in physicalRows: print >> output, prefix \ + delim.join([justify(str(item), width) for (item, width) in zip(row, maxWidths)]) + postfix if separateRows or hasHeader: print >> output, rowSeparator hasHeader = False return output.getvalue()
Indents a table by column. - rows: A sequence of sequences of items, one sequence per row. - hasHeader: True if the first row consists of the columns' names. - headerChar: Character to be used for the row separator line (if hasHeader==True or separateRows==True). - delim: The column delimiter. - justify: Determines how are data justified in their column. Valid values are 'left','right' and 'center'. - separateRows: True if rows are to be separated by a line of 'headerChar's. - prefix: A string prepended to each printed row. - postfix: A string appended to each printed row. - wrapfunc: A function f(text) for wrapping text; each element in the table is first wrapped by this function.
878
def register_on_medium_changed(self, callback): event_type = library.VBoxEventType.on_medium_changed return self.event_source.register_callback(callback, event_type)
Set the callback function to consume on medium changed events. Callback receives a IMediumChangedEvent object. Returns the callback_id
879
def display_results(repo_name, contributors, api_len): print("\n") print("All Contributors:") seen = [] for user in sorted(contributors, key=_sort_by_name): if user.get("name"): key = user["name"] else: key = user["user_name"] if key not in seen: seen.append(key) if key != user["user_name"]: print("%s (%s)" % (user["name"], user["user_name"])) else: print(user["user_name"]) print("") print("Repo: %s" % repo_name) print("GitHub Contributors: %s" % api_len) print("All Contributors: %s 👏" % len(seen))
Fancy display.
880
def get_clean_factor_and_forward_returns(factor, prices, groupby=None, binning_by_group=False, quantiles=5, bins=None, periods=(1, 5, 10), filter_zscore=20, groupby_labels=None, max_loss=0.35, zero_aware=False, cumulative_returns=True): forward_returns = compute_forward_returns(factor, prices, periods, filter_zscore, cumulative_returns) factor_data = get_clean_factor(factor, forward_returns, groupby=groupby, groupby_labels=groupby_labels, quantiles=quantiles, bins=bins, binning_by_group=binning_by_group, max_loss=max_loss, zero_aware=zero_aware) return factor_data
Formats the factor data, pricing data, and group mappings into a DataFrame that contains aligned MultiIndex indices of timestamp and asset. The returned data will be formatted to be suitable for Alphalens functions. It is safe to skip a call to this function and still make use of Alphalens functionalities as long as the factor data conforms to the format returned from get_clean_factor_and_forward_returns and documented here Parameters ---------- factor : pd.Series - MultiIndex A MultiIndex Series indexed by timestamp (level 0) and asset (level 1), containing the values for a single alpha factor. :: ----------------------------------- date | asset | ----------------------------------- | AAPL | 0.5 ----------------------- | BA | -1.1 ----------------------- 2014-01-01 | CMG | 1.7 ----------------------- | DAL | -0.1 ----------------------- | LULU | 2.7 ----------------------- prices : pd.DataFrame A wide form Pandas DataFrame indexed by timestamp with assets in the columns. Pricing data must span the factor analysis time period plus an additional buffer window that is greater than the maximum number of expected periods in the forward returns calculations. It is important to pass the correct pricing data in depending on what time of period your signal was generated so to avoid lookahead bias, or delayed calculations. 'Prices' must contain at least an entry for each timestamp/asset combination in 'factor'. This entry should reflect the buy price for the assets and usually it is the next available price after the factor is computed but it can also be a later price if the factor is meant to be traded later (e.g. if the factor is computed at market open but traded 1 hour after market open the price information should be 1 hour after market open). 'Prices' must also contain entries for timestamps following each timestamp/asset combination in 'factor', as many more timestamps as the maximum value in 'periods'. The asset price after 'period' timestamps will be considered the sell price for that asset when computing 'period' forward returns. :: ---------------------------------------------------- | AAPL | BA | CMG | DAL | LULU | ---------------------------------------------------- Date | | | | | | ---------------------------------------------------- 2014-01-01 |605.12| 24.58| 11.72| 54.43 | 37.14 | ---------------------------------------------------- 2014-01-02 |604.35| 22.23| 12.21| 52.78 | 33.63 | ---------------------------------------------------- 2014-01-03 |607.94| 21.68| 14.36| 53.94 | 29.37 | ---------------------------------------------------- groupby : pd.Series - MultiIndex or dict Either A MultiIndex Series indexed by date and asset, containing the period wise group codes for each asset, or a dict of asset to group mappings. If a dict is passed, it is assumed that group mappings are unchanged for the entire time period of the passed factor data. binning_by_group : bool If True, compute quantile buckets separately for each group. This is useful when the factor values range vary considerably across gorups so that it is wise to make the binning group relative. You should probably enable this if the factor is intended to be analyzed for a group neutral portfolio quantiles : int or sequence[float] Number of equal-sized quantile buckets to use in factor bucketing. Alternately sequence of quantiles, allowing non-equal-sized buckets e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95] Only one of 'quantiles' or 'bins' can be not-None bins : int or sequence[float] Number of equal-width (valuewise) bins to use in factor bucketing. Alternately sequence of bin edges allowing for non-uniform bin width e.g. [-4, -2, -0.5, 0, 10] Chooses the buckets to be evenly spaced according to the values themselves. Useful when the factor contains discrete values. Only one of 'quantiles' or 'bins' can be not-None periods : sequence[int] periods to compute forward returns on. filter_zscore : int or float, optional Sets forward returns greater than X standard deviations from the the mean to nan. Set it to 'None' to avoid filtering. Caution: this outlier filtering incorporates lookahead bias. groupby_labels : dict A dictionary keyed by group code with values corresponding to the display name for each group. max_loss : float, optional Maximum percentage (0.00 to 1.00) of factor data dropping allowed, computed comparing the number of items in the input factor index and the number of items in the output DataFrame index. Factor data can be partially dropped due to being flawed itself (e.g. NaNs), not having provided enough price data to compute forward returns for all factor values, or because it is not possible to perform binning. Set max_loss=0 to avoid Exceptions suppression. zero_aware : bool, optional If True, compute quantile buckets separately for positive and negative signal values. This is useful if your signal is centered and zero is the separation between long and short signals, respectively. cumulative_returns : bool, optional If True, forward returns columns will contain cumulative returns. Setting this to False is useful if you want to analyze how predictive a factor is for a single forward day. Returns ------- merged_data : pd.DataFrame - MultiIndex A MultiIndex Series indexed by date (level 0) and asset (level 1), containing the values for a single alpha factor, forward returns for each period, the factor quantile/bin that factor value belongs to, and (optionally) the group the asset belongs to. - forward returns column names follow the format accepted by pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc) - 'date' index freq property (merged_data.index.levels[0].freq) will be set to a trading calendar (pandas DateOffset) inferred from the input data (see infer_trading_calendar for more details). This is currently used only in cumulative returns computation :: ------------------------------------------------------------------- | | 1D | 5D | 10D |factor|group|factor_quantile ------------------------------------------------------------------- date | asset | | | | | | ------------------------------------------------------------------- | AAPL | 0.09|-0.01|-0.079| 0.5 | G1 | 3 -------------------------------------------------------- | BA | 0.02| 0.06| 0.020| -1.1 | G2 | 5 -------------------------------------------------------- 2014-01-01 | CMG | 0.03| 0.09| 0.036| 1.7 | G2 | 1 -------------------------------------------------------- | DAL |-0.02|-0.06|-0.029| -0.1 | G3 | 5 -------------------------------------------------------- | LULU |-0.03| 0.05|-0.009| 2.7 | G1 | 2 --------------------------------------------------------
881
def get_selection(self, name="default"): name = _normalize_selection_name(name) selection_history = self.selection_histories[name] index = self.selection_history_indices[name] if index == -1: return None else: return selection_history[index]
Get the current selection object (mostly for internal use atm).
882
def wait_to_end(self, pids=[]): actual_pids = self._get_pids(pids) return self.wait_for(pids=actual_pids, status_list=process_result_statuses)
wait_to_end(self, pids=[]) Wait for processes to finish :Parameters: * *pids* (`list`) -- list of processes to wait to finish
883
def get_plugins(**kwargs): plugins = [] plugin_paths = [] base_plugin_dir = config.get(, ) plugin_xsd_path = config.get(, ) base_plugin_dir_contents = os.listdir(base_plugin_dir) for directory in base_plugin_dir_contents: if directory[0] == or directory == : continue path = os.path.join(base_plugin_dir, directory) if os.path.isdir(path): plugin_paths.append(path) xmlschema_doc = etree.parse(plugin_xsd_path) xmlschema = etree.XMLSchema(xmlschema_doc) for plugin_dir in plugin_paths: full_plugin_path = os.path.join(plugin_dir, ) dir_contents = os.listdir(full_plugin_path) for file_name in dir_contents: file_path = os.path.join(full_plugin_path, file_name) if file_name == : f = open(file_path, ) try: y = open(file_path, ) xml_tree = etree.parse(y) xmlschema.assertValid(xml_tree) plugins.append(etree.tostring(xml_tree)) except Exception as e: log.critical("Schema %s did not validate! (error was %s)"%(file_name, e)) break else: log.warning("No xml plugin details found for %s. Ignoring", plugin_dir) return plugins
Get all available plugins
884
def supportsType(self, type_uri): return ( (type_uri in self.type_uris) or (type_uri == OPENID_2_0_TYPE and self.isOPIdentifier()) )
Does this endpoint support this type? I consider C{/server} endpoints to implicitly support C{/signon}.
885
def _generate_token(self): session = self.get_session() url = self.__base_url() try: auth = requests.auth.HTTPBasicAuth(self._user, self._password) req = session.get(url, auth=auth, timeout=self._timeout_default) if not req.ok: auth = requests.auth.HTTPDigestAuth( self._user, self._password) req = session.get( url, auth=auth, timeout=self._timeout_default) req.raise_for_status() except requests.RequestException as error: _LOGGER.error(error) raise CommError() result = req.text.lower() if in result or in result: _LOGGER.error(, req.text.strip().replace(, )) raise LoginError() return auth
Create authentation to use with requests.
886
def run(self, eps=1e-4, kill=True, max_steps=50, verbose=False): r old_distance = np.finfo(np.float64).max new_distance = np.finfo(np.float64).max if verbose: print( % len(self.g.components)) converged = False for step in range(1, max_steps + 1): self._cleanup(kill, verbose) self._regroup() self._refit() new_distance = self._distance() assert new_distance >= 0, % new_distance if verbose: print( % (step, new_distance)) if new_distance == old_distance: converged = True if verbose: print( % step) break rel_change = (old_distance - new_distance) / old_distance assert not (rel_change < -1e-13), if rel_change < eps and not converged and step > 0: converged = True if verbose and new_distance != old_distance: print( % step) break old_distance = new_distance self._cleanup(kill, verbose) if verbose: print( % len(self.g.components)) if converged: return step
r"""Perform the clustering on the input components updating the initial guess. The result is available in the member ``self.g``. Return the number of iterations at convergence, or None. :param eps: If relative change of distance between current and last step falls below ``eps``, declare convergence: .. math:: 0 < \frac{d^t - d^{t-1}}{d^t} < \varepsilon :param kill: If a component is assigned zero weight (no input components), it is removed. :param max_steps: Perform a maximum number of update steps. :param verbose: Output information on progress of algorithm.
887
def _padding_to_conv_op_padding(padding): if not isinstance(padding, tuple): raise ValueError("padding should be a tuple.") if all(p == SAME for p in padding): return SAME else: return VALID
Whether to use SAME or VALID for the underlying convolution op. Args: padding: A tuple of members of ALLOWED_PADDINGS, e.g. as returned from `_fill_and_verify_padding`. Returns: One of CONV_OP_ALLOWED_PADDINGS, the padding method to use for the underlying convolution op. Raises: ValueError: If padding is not a tuple.
888
def renew_item(self, item, expiration): conn = self._conn() self._run_expiration(conn) expiration += time.time() script = conn.register_script() result = script(keys=[self._key_expiration(), self._key_workers()], args=[item, expiration, self._get_worker_id(conn)]) if result == -1: raise LostLease(item) return
Update the expiration time for ``item``. The item will remain checked out for ``expiration`` seconds beyond the current time. This queue instance must have already checked out ``item``, and this method can fail if ``item`` is already overdue.
889
def _run_all(cmd, log_lvl=None, log_msg=None, exitcode=0): t exitcode cmd.run_allretcodestdoutstdoutstderr']) return False
Simple wrapper around cmd.run_all log_msg can contain {0} for stderr :return: True or stdout, False if retcode wasn't exitcode
890
def _normalize_dir(string_): return os.path.normpath(salt.utils.stringutils.to_unicode(string_))
Normalize the directory to make comparison possible
891
def do_group(self): group_id = self.config.group systems = {: generate_machine_id()} self.group_systems(group_id, systems)
Do grouping on register
892
def personsAtHome(self, home=None): if not home: home = self.default_home home_data = self.homeByName(home) atHome = [] for p in home_data[]: if in p: if not p["out_of_sight"]: atHome.append(p[]) return atHome
Return the list of known persons who are currently at home
893
def safe_copyfile(src, dest): fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(dest)) shutil.copyfileobj(open(src, ), os.fdopen(fd, )) shutil.copystat(src, tmpname) os.rename(tmpname, dest)
safely copy src to dest using a temporary intermediate and then renaming to dest
894
def get_resource_query_session(self, proxy): if not self.supports_resource_query(): raise errors.Unimplemented() return sessions.ResourceQuerySession(proxy=proxy, runtime=self._runtime)
Gets a resource query session. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.resource.ResourceQuerySession) - ``a ResourceQuerySession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_resource_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_resource_query()`` is ``true``.*
895
def _raise_from_invalid_response(error): response = error.response error_message = str(error) message = u"{method} {url}: {error}".format( method=response.request.method, url=response.request.url, error=error_message ) raise exceptions.from_http_status(response.status_code, message, response=response)
Re-wrap and raise an ``InvalidResponse`` exception. :type error: :exc:`google.resumable_media.InvalidResponse` :param error: A caught exception from the ``google-resumable-media`` library. :raises: :class:`~google.cloud.exceptions.GoogleCloudError` corresponding to the failed status code
896
def logsumexp(arr, axis=0): arr = np.rollaxis(arr, axis) vmax = arr.max(axis=0) out = np.log(np.sum(np.exp(arr - vmax), axis=0)) out += vmax return out
Computes the sum of arr assuming arr is in the log domain. Returns log(sum(exp(arr))) while minimizing the possibility of over/underflow. Examples -------- >>> import numpy as np >>> from sklearn.utils.extmath import logsumexp >>> a = np.arange(10) >>> np.log(np.sum(np.exp(a))) 9.4586297444267107 >>> logsumexp(a) 9.4586297444267107
897
def from_dict(data, ctx): data = data.copy() if data.get() is not None: data[] = ctx.convert_decimal_number( data.get() ) if data.get() is not None: data[] = ctx.convert_decimal_number( data.get() ) return GuaranteedStopLossOrderLevelRestriction(**data)
Instantiate a new GuaranteedStopLossOrderLevelRestriction from a dict (generally from loading a JSON response). The data used to instantiate the GuaranteedStopLossOrderLevelRestriction is a shallow copy of the dict passed in, with any complex child types instantiated appropriately.
898
def get_stories(self, story_type=, limit=30): if limit is None or limit < 1 or limit > 30: limit = 30 stories_found = 0 while stories_found < limit: soup = get_soup(page=story_type) all_rows = self._get_zipped_rows(soup) stories = self._build_story(all_rows) for story in stories: yield story stories_found += 1 if stories_found == limit: return
Yields a list of stories from the passed page of HN. 'story_type' can be: \t'' = top stories (homepage) (default) \t'news2' = page 2 of top stories \t'newest' = most recent stories \t'best' = best stories 'limit' is the number of stories required from the given page. Defaults to 30. Cannot be more than 30.
899
def copy_and_sum_families(family_source, family_target): for every in family_source: if every not in family_target: family_target[every] = family_source[every] else: family_target[every] += family_source[every]
methods iterates thru source family and copies its entries to target family in case key already exists in both families - then the values are added