code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def autocorrelation(x, lag): """ Calculates the autocorrelation of the specified lag, according to the formula [1] .. math:: \\frac{1}{(n-l)\sigma^{2}} \\sum_{t=1}^{n-l}(X_{t}-\\mu )(X_{t+l}-\\mu) where :math:`n` is the length of the time series :math:`X_i`, :math:`\sigma^2` its variance and :math:`\mu` its mean. `l` denotes the lag. .. rubric:: References [1] https://en.wikipedia.org/wiki/Autocorrelation#Estimation :param x: the time series to calculate the feature of :type x: numpy.ndarray :param lag: the lag :type lag: int :return: the value of this feature :return type: float """ # This is important: If a series is passed, the product below is calculated # based on the index, which corresponds to squaring the series. if type(x) is pd.Series: x = x.values if len(x) < lag: return np.nan # Slice the relevant subseries based on the lag y1 = x[:(len(x)-lag)] y2 = x[lag:] # Subtract the mean of the whole series x x_mean = np.mean(x) # The result is sometimes referred to as "covariation" sum_product = np.sum((y1 - x_mean) * (y2 - x_mean)) # Return the normalized unbiased covariance v = np.var(x) if np.isclose(v, 0): return np.NaN else: return sum_product / ((len(x) - lag) * v)
Calculates the autocorrelation of the specified lag, according to the formula [1] .. math:: \\frac{1}{(n-l)\sigma^{2}} \\sum_{t=1}^{n-l}(X_{t}-\\mu )(X_{t+l}-\\mu) where :math:`n` is the length of the time series :math:`X_i`, :math:`\sigma^2` its variance and :math:`\mu` its mean. `l` denotes the lag. .. rubric:: References [1] https://en.wikipedia.org/wiki/Autocorrelation#Estimation :param x: the time series to calculate the feature of :type x: numpy.ndarray :param lag: the lag :type lag: int :return: the value of this feature :return type: float
def paste_clipboard(self, event): """ Send the clipboard content as user input to the CPU. """ log.critical("paste clipboard") clipboard = self.root.clipboard_get() for line in clipboard.splitlines(): log.critical("paste line: %s", repr(line)) self.add_user_input(line + "\r")
Send the clipboard content as user input to the CPU.
def _method_error_handler(self, response: Dict[str, Any]): """处理400~499段状态码,为对应的任务设置异常. Parameters: (response): - 响应的python字典形式数据 Return: (bool): - 准确地说没有错误就会返回True """ exp = response.get('MESSAGE') code = response.get("CODE") ID = exp.get("ID") raise abort(code, ID=ID, message=exp.get('MESSAGE'))
处理400~499段状态码,为对应的任务设置异常. Parameters: (response): - 响应的python字典形式数据 Return: (bool): - 准确地说没有错误就会返回True
def _must_be_deleted(local_path, r_st): """Return True if the remote correspondent of local_path has to be deleted. i.e. if it doesn't exists locally or if it has a different type from the remote one.""" # if the file doesn't exists if not os.path.lexists(local_path): return True # or if the file type is different l_st = os.lstat(local_path) if S_IFMT(r_st.st_mode) != S_IFMT(l_st.st_mode): return True return False
Return True if the remote correspondent of local_path has to be deleted. i.e. if it doesn't exists locally or if it has a different type from the remote one.
def receive(self): """Receive TCP response, looping to get whole thing or timeout.""" try: buffer = self._socket.recv(BUFFER_SIZE) except socket.timeout as error: # Something is wrong, assume it's offline temporarily _LOGGER.error("Error receiving: %s", error) # self._socket.close() return "" # Read until a newline or timeout buffering = True response = '' while buffering: if '\n' in buffer.decode("utf8"): response = buffer.decode("utf8").split('\n')[0] buffering = False else: try: more = self._socket.recv(BUFFER_SIZE) except socket.timeout: more = None if not more: buffering = False response = buffer.decode("utf8") else: buffer += more return response
Receive TCP response, looping to get whole thing or timeout.
def binarize_signal(signal, treshold="auto", cut="higher"): """ Binarize a channel based on a continuous channel. Parameters ---------- signal = array or list The signal channel. treshold = float The treshold value by which to select the events. If "auto", takes the value between the max and the min. cut = str "higher" or "lower", define the events as above or under the treshold. For photosensors, a white screen corresponds usually to higher values. Therefore, if your events were signalled by a black colour, events values would be the lower ones, and you should set the cut to "lower". Returns ---------- list binary_signal Example ---------- >>> import neurokit as nk >>> binary_signal = nk.binarize_signal(signal, treshold=4) Authors ---------- - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ Dependencies ---------- None """ if treshold == "auto": treshold = (np.max(np.array(signal)) - np.min(np.array(signal)))/2 signal = list(signal) binary_signal = [] for i in range(len(signal)): if cut == "higher": if signal[i] > treshold: binary_signal.append(1) else: binary_signal.append(0) else: if signal[i] < treshold: binary_signal.append(1) else: binary_signal.append(0) return(binary_signal)
Binarize a channel based on a continuous channel. Parameters ---------- signal = array or list The signal channel. treshold = float The treshold value by which to select the events. If "auto", takes the value between the max and the min. cut = str "higher" or "lower", define the events as above or under the treshold. For photosensors, a white screen corresponds usually to higher values. Therefore, if your events were signalled by a black colour, events values would be the lower ones, and you should set the cut to "lower". Returns ---------- list binary_signal Example ---------- >>> import neurokit as nk >>> binary_signal = nk.binarize_signal(signal, treshold=4) Authors ---------- - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ Dependencies ---------- None
def _tracked_model_diff(self): """Returns changes made to model instance. Returns None if no changes were made. """ initial_state = self._tracked_model_initial_state current_state = serializer.dump_model(self) if current_state == initial_state: return None change_log = {} for field in initial_state: old_value = initial_state[field][Field.VALUE] new_value = current_state[field][Field.VALUE] if old_value == new_value: continue field_data = initial_state.copy()[field] del field_data[Field.VALUE] field_data[Field.OLD] = old_value field_data[Field.NEW] = new_value change_log[field] = field_data return change_log or None
Returns changes made to model instance. Returns None if no changes were made.
def wp_status(self): '''show status of wp download''' try: print("Have %u of %u waypoints" % (self.wploader.count()+len(self.wp_received), self.wploader.expected_count)) except Exception: print("Have %u waypoints" % (self.wploader.count()+len(self.wp_received)))
show status of wp download
def is_rfc2822(instance: str): """Validates RFC2822 format""" if not isinstance(instance, str): return True return email.utils.parsedate(instance) is not None
Validates RFC2822 format
def stream(self): """ :class:`Stream` object for playing """ # Add song to queue self._connection.request( 'addSongsToQueue', {'songIDsArtistIDs': [{'artistID': self.artist.id, 'source': 'user', 'songID': self.id, 'songQueueSongID': 1}], 'songQueueID': self._connection.session.queue}, self._connection.header('addSongsToQueue', 'jsqueue')) stream_info = self._connection.request( 'getStreamKeyFromSongIDEx', {'songID': self.id, 'country': self._connection.session.country, 'prefetch': False, 'mobile': False}, self._connection.header('getStreamKeyFromSongIDEx', 'jsqueue'))[1] return Stream(stream_info['ip'], stream_info['streamKey'], self._connection)
:class:`Stream` object for playing
def format(self, tokensource, outfile): """ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. This implementation calculates where it should draw each token on the pixmap, then calculates the required pixmap size and draws the items. """ self._create_drawables(tokensource) self._draw_line_numbers() im = Image.new( 'RGB', self._get_image_size(self.maxcharno, self.maxlineno), self.background_color ) self._paint_line_number_bg(im) draw = ImageDraw.Draw(im) # Highlight if self.hl_lines: x = self.image_pad + self.line_number_width - self.line_number_pad + 1 recth = self._get_line_height() rectw = im.size[0] - x for linenumber in self.hl_lines: y = self._get_line_y(linenumber - 1) draw.rectangle([(x, y), (x + rectw, y + recth)], fill=self.hl_color) for pos, value, font, kw in self.drawables: draw.text(pos, value, font=font, **kw) im.save(outfile, self.image_format.upper())
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. This implementation calculates where it should draw each token on the pixmap, then calculates the required pixmap size and draws the items.
def has_frames(self, destination): """ Whether specified queue has any frames. @param destination: The queue name (destinationination). @type destination: C{str} @return: Whether there are any frames in the specified queue. @rtype: C{bool} """ session = meta.Session() sel = select([model.frames_table.c.message_id]).where( model.frames_table.c.destination == destination) result = session.execute(sel) first = result.fetchone() return first is not None
Whether specified queue has any frames. @param destination: The queue name (destinationination). @type destination: C{str} @return: Whether there are any frames in the specified queue. @rtype: C{bool}
def enableGroup(self): """Enables all radio buttons in the group.""" radioButtonListInGroup = PygWidgetsRadioButton.__PygWidgets__Radio__Buttons__Groups__Dicts__[self.group] for radioButton in radioButtonListInGroup: radioButton.enable()
Enables all radio buttons in the group.
def fuzzy_index_match(possiblities, label, **kwargs): """Find the closest matching column label, key, or integer indexed value Returns: type(label): sequence of immutable objects corresponding to best matches to each object in label if label is an int returns the object (value) in the list of possibilities at that index if label is a str returns the closest str match in possibilities >>> from collections import OrderedDict as odict >>> fuzzy_index_match(pd.DataFrame(pd.np.random.randn(9,4), columns=list('ABCD'), index=range(9)), 'b') 'B' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), 'r2d2') '2' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), 1) '2' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), -1) '5' >>> fuzzy_index_match(odict(zip(range(4),'FOUR')), -4) 0 """ possibilities = list(possiblities) if isinstance(label, basestring): return fuzzy_get(possibilities, label, **kwargs) if isinstance(label, int): return possibilities[label] if isinstance(label, list): return [fuzzy_get(possibilities, lbl) for lbl in label]
Find the closest matching column label, key, or integer indexed value Returns: type(label): sequence of immutable objects corresponding to best matches to each object in label if label is an int returns the object (value) in the list of possibilities at that index if label is a str returns the closest str match in possibilities >>> from collections import OrderedDict as odict >>> fuzzy_index_match(pd.DataFrame(pd.np.random.randn(9,4), columns=list('ABCD'), index=range(9)), 'b') 'B' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), 'r2d2') '2' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), 1) '2' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), -1) '5' >>> fuzzy_index_match(odict(zip(range(4),'FOUR')), -4) 0
def delete_group_policy(self, group_name, policy_name): """ Deletes the specified policy document for the specified group. :type group_name: string :param group_name: The name of the group the policy is associated with. :type policy_name: string :param policy_name: The policy document to delete. """ params = {'GroupName' : group_name, 'PolicyName' : policy_name} return self.get_response('DeleteGroupPolicy', params, verb='POST')
Deletes the specified policy document for the specified group. :type group_name: string :param group_name: The name of the group the policy is associated with. :type policy_name: string :param policy_name: The policy document to delete.
def load_copy_of_template(self, name, *parameters): """Load a copy of message template saved with `Save template` when originally saved values need to be preserved from test to test. Optional parameters are default values for message header separated with colon. Examples: | Load Copy Of Template | MyMessage | header_field:value | """ template, fields, header_fields = self._set_templates_fields_and_header_fields(name, parameters) copy_of_template = copy.deepcopy(template) copy_of_fields = copy.deepcopy(fields) self._init_new_message_stack(copy_of_template, copy_of_fields, header_fields)
Load a copy of message template saved with `Save template` when originally saved values need to be preserved from test to test. Optional parameters are default values for message header separated with colon. Examples: | Load Copy Of Template | MyMessage | header_field:value |
def expand(string, vars, local_vars={}): """Expand a string containing $vars as Ninja would. Note: doesn't handle the full Ninja variable syntax, but it's enough to make configure.py's use of it work. """ def exp(m): var = m.group(1) if var == '$': return '$' return local_vars.get(var, vars.get(var, '')) return re.sub(r'\$(\$|\w*)', exp, string)
Expand a string containing $vars as Ninja would. Note: doesn't handle the full Ninja variable syntax, but it's enough to make configure.py's use of it work.
def __init(self): """loads the property data into the class""" if self._portalId is None: from .administration import Administration portalSelf = Administration(url=self._securityHandler.org_url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port).portals.portalSelf self._portalId = portalSelf.id self._currentUser = portalSelf.user['username']
loads the property data into the class
def on_conflict(self, fields: List[Union[str, Tuple[str]]], action, index_predicate: str=None): """Sets the action to take when conflicts arise when attempting to insert/create a new row. Arguments: fields: The fields the conflicts can occur in. action: The action to take when the conflict occurs. index_predicate: The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking conflicts) """ self.conflict_target = fields self.conflict_action = action self.index_predicate = index_predicate return self
Sets the action to take when conflicts arise when attempting to insert/create a new row. Arguments: fields: The fields the conflicts can occur in. action: The action to take when the conflict occurs. index_predicate: The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking conflicts)
def _get_values(values, skipna, fill_value=None, fill_value_typ=None, isfinite=False, copy=True, mask=None): """ utility to get the values view, mask, dtype if necessary copy and mask using the specified fill_value copy = True will force the copy """ if is_datetime64tz_dtype(values): # com.values_from_object returns M8[ns] dtype instead of tz-aware, # so this case must be handled separately from the rest dtype = values.dtype values = getattr(values, "_values", values) else: values = com.values_from_object(values) dtype = values.dtype if mask is None: if isfinite: mask = _isfinite(values) else: mask = isna(values) if is_datetime_or_timedelta_dtype(values) or is_datetime64tz_dtype(values): # changing timedelta64/datetime64 to int64 needs to happen after # finding `mask` above values = getattr(values, "asi8", values) values = values.view(np.int64) dtype_ok = _na_ok_dtype(dtype) # get our fill value (in case we need to provide an alternative # dtype for it) fill_value = _get_fill_value(dtype, fill_value=fill_value, fill_value_typ=fill_value_typ) if skipna: if copy: values = values.copy() if dtype_ok: np.putmask(values, mask, fill_value) # promote if needed else: values, changed = maybe_upcast_putmask(values, mask, fill_value) elif copy: values = values.copy() # return a platform independent precision dtype dtype_max = dtype if is_integer_dtype(dtype) or is_bool_dtype(dtype): dtype_max = np.int64 elif is_float_dtype(dtype): dtype_max = np.float64 return values, mask, dtype, dtype_max, fill_value
utility to get the values view, mask, dtype if necessary copy and mask using the specified fill_value copy = True will force the copy
def distribute_equally(daily_data, divide=False): """Obtains hourly values by equally distributing the daily values. Args: daily_data: daily values divide: if True, divide resulting values by the number of hours in order to preserve the daily sum (required e.g. for precipitation). Returns: Equally distributed hourly values. """ index = hourly_index(daily_data.index) hourly_data = daily_data.reindex(index) hourly_data = hourly_data.groupby(hourly_data.index.day).transform( lambda x: x.fillna(method='ffill', limit=23)) if divide: hourly_data /= 24 return hourly_data
Obtains hourly values by equally distributing the daily values. Args: daily_data: daily values divide: if True, divide resulting values by the number of hours in order to preserve the daily sum (required e.g. for precipitation). Returns: Equally distributed hourly values.
def is_enhanced_rr_cap_valid(self): """Checks is enhanced route refresh capability is enabled/valid. Checks sent and received `Open` messages to see if this session with peer is capable of enhanced route refresh capability. """ if not self.recv_open_msg: raise ValueError('Did not yet receive peers open message.') err_cap_enabled = False local_caps = self.sent_open_msg.opt_param peer_caps = self.recv_open_msg.opt_param local_cap = [cap for cap in local_caps if cap.cap_code == BGP_CAP_ENHANCED_ROUTE_REFRESH] peer_cap = [cap for cap in peer_caps if cap.cap_code == BGP_CAP_ENHANCED_ROUTE_REFRESH] # Both local and peer should advertise ERR capability for it to be # enabled. if local_cap and peer_cap: err_cap_enabled = True return err_cap_enabled
Checks is enhanced route refresh capability is enabled/valid. Checks sent and received `Open` messages to see if this session with peer is capable of enhanced route refresh capability.
def tachogram(data, sample_rate, signal=False, in_seconds=False, out_seconds=False): """ Function for generation of ECG Tachogram. ---------- Parameters ---------- data : list ECG signal or R peak list. When the input is a raw signal the input flag signal should be True. sample_rate : int Sampling frequency. signal : boolean If True, then the data argument contains the set of the ECG acquired samples. in_seconds : boolean If the R peaks list defined as the input argument "data" contains the sample numbers where the R peaks occur, then in_seconds needs to be False. out_seconds : boolean If True then each sample of the returned time axis is expressed in seconds. Returns ------- out : list, list List of tachogram samples. List of instants where each cardiac cycle ends. """ if signal is False: # data is a list of R peaks position. data_copy = data time_axis = numpy.array(data)#.cumsum() if out_seconds is True and in_seconds is False: time_axis = time_axis / sample_rate else: # data is a ECG signal. # Detection of R peaks. data_copy = detect_r_peaks(data, sample_rate, time_units=out_seconds, volts=False, resolution=None, plot_result=False)[0] time_axis = data_copy # Generation of Tachogram. tachogram_data = numpy.diff(time_axis) tachogram_time = time_axis[1:] return tachogram_data, tachogram_time
Function for generation of ECG Tachogram. ---------- Parameters ---------- data : list ECG signal or R peak list. When the input is a raw signal the input flag signal should be True. sample_rate : int Sampling frequency. signal : boolean If True, then the data argument contains the set of the ECG acquired samples. in_seconds : boolean If the R peaks list defined as the input argument "data" contains the sample numbers where the R peaks occur, then in_seconds needs to be False. out_seconds : boolean If True then each sample of the returned time axis is expressed in seconds. Returns ------- out : list, list List of tachogram samples. List of instants where each cardiac cycle ends.
def range(start, finish, step): """Like built-in :func:`~builtins.range`, but with float support""" value = start while value <= finish: yield value value += step
Like built-in :func:`~builtins.range`, but with float support
def find(collection, query=None, user=None, password=None, host=None, port=None, database='admin', authdb=None): ''' Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database> ''' conn = _connect(user, password, host, port, database, authdb) if not conn: return 'Failed to connect to mongo database' try: query = _to_dict(query) except Exception as err: return err try: log.info("Searching for %r in %s", query, collection) mdb = pymongo.database.Database(conn, database) col = getattr(mdb, collection) ret = col.find(query) return list(ret) except pymongo.errors.PyMongoError as err: log.error("Searching objects failed with error: %s", err) return err
Find an object or list of objects in a collection CLI Example: .. code-block:: bash salt '*' mongodb.find mycollection '[{"foo": "FOO", "bar": "BAR"}]' <user> <password> <host> <port> <database>
def protorpc_to_endpoints_error(self, status, body): """Convert a ProtoRPC error to the format expected by Google Endpoints. If the body does not contain an ProtoRPC message in state APPLICATION_ERROR the status and body will be returned unchanged. Args: status: HTTP status of the response from the backend body: JSON-encoded error in format expected by Endpoints frontend. Returns: Tuple of (http status, body) """ try: rpc_error = self.__PROTOJSON.decode_message(remote.RpcStatus, body) except (ValueError, messages.ValidationError): rpc_error = remote.RpcStatus() if rpc_error.state == remote.RpcStatus.State.APPLICATION_ERROR: # Try to map to HTTP error code. error_class = _ERROR_NAME_MAP.get(rpc_error.error_name) if error_class: status, body = self.__write_error(error_class.http_status, rpc_error.error_message) return status, body
Convert a ProtoRPC error to the format expected by Google Endpoints. If the body does not contain an ProtoRPC message in state APPLICATION_ERROR the status and body will be returned unchanged. Args: status: HTTP status of the response from the backend body: JSON-encoded error in format expected by Endpoints frontend. Returns: Tuple of (http status, body)
def _append_unknown_char(self): ''' Appends the unknown character, in case one was encountered. ''' if self.unknown_strategy == UNKNOWN_INCLUDE and \ self.unknown_char is not None: self._append_to_stack(self.unknown_char) self.unknown_char = None
Appends the unknown character, in case one was encountered.
def create_enterprise_session(url, token=None): """ Create a github3.py session for a GitHub Enterprise instance If token is not provided, will attempt to use the GITHUB_API_TOKEN environment variable if present. """ gh_session = github3.enterprise_login(url=url, token=token) if gh_session is None: msg = 'Unable to connect to GitHub Enterprise (%s) with provided token.' raise RuntimeError(msg, url) return gh_session
Create a github3.py session for a GitHub Enterprise instance If token is not provided, will attempt to use the GITHUB_API_TOKEN environment variable if present.
def set_result(self, result, from_tree=False): """Set the addresses's value unless the future has been declared read only. Args: result (bytes): The value at an address. from_tree (bool): Whether the value is being set by a read from the merkle tree. Returns: None """ if self._read_only: if not from_tree: LOGGER.warning("Tried to set address %s on a" " read-only context.", self.address) return with self._condition: if self._read_only: if not from_tree: LOGGER.warning("Tried to set address %s on a" " read-only context.", self.address) return if from_tree: # If the result has not been set in the context, overwrite the # value with the value from the merkle tree. Otherwise, do # nothing. if not self._result_set_in_context: self._result = result self._tree_has_set = True else: self._result = result self._result_set_in_context = True self._deleted = False self._condition.notify_all()
Set the addresses's value unless the future has been declared read only. Args: result (bytes): The value at an address. from_tree (bool): Whether the value is being set by a read from the merkle tree. Returns: None
def confine(x,low,high): '''Confine x to [low,high]. Values outside are set to low/high. See also restrict.''' y=x.copy() y[y < low] = low y[y > high] = high return y
Confine x to [low,high]. Values outside are set to low/high. See also restrict.
def _autocomplete(client, url_part, input_text, session_token=None, offset=None, location=None, radius=None, language=None, types=None, components=None, strict_bounds=False): """ Internal handler for ``autocomplete`` and ``autocomplete_query``. See each method's docs for arg details. """ params = {"input": input_text} if session_token: params["sessiontoken"] = session_token if offset: params["offset"] = offset if location: params["location"] = convert.latlng(location) if radius: params["radius"] = radius if language: params["language"] = language if types: params["types"] = types if components: if len(components) != 1 or list(components.keys())[0] != "country": raise ValueError("Only country components are supported") params["components"] = convert.components(components) if strict_bounds: params["strictbounds"] = "true" url = "/maps/api/place/%sautocomplete/json" % url_part return client._request(url, params).get("predictions", [])
Internal handler for ``autocomplete`` and ``autocomplete_query``. See each method's docs for arg details.
def _append_integer(self, value, _file): """Call this function to write integer contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file """ _tabs = '\t' * self._tctr _text = value _labs = '{tabs}<integer>{text}</integer>\n'.format(tabs=_tabs, text=_text) _file.write(_labs)
Call this function to write integer contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file
def allow_rwe(self, name): """Allow all privileges for a particular name group (user, group, other).""" assert name in PERMISSIONS.keys() os.chmod(self.file_path, PERMISSIONS[name]['all'])
Allow all privileges for a particular name group (user, group, other).
def check_result(data, key=''): """Check the result of an API response. Ideally, this should be done by checking that the value of the ``resultCode`` attribute is 0, but there are endpoints that simply do not follow this rule. Args: data (dict): Response obtained from the API endpoint. key (string): Key to check for existence in the dict. Returns: bool: True if result was correct, False otherwise. """ if not isinstance(data, dict): return False if key: if key in data: return True return False if 'resultCode' in data.keys(): # OpenBus return True if data.get('resultCode', -1) == 0 else False elif 'code' in data.keys(): # Parking return True if data.get('code', -1) == 0 else False return False
Check the result of an API response. Ideally, this should be done by checking that the value of the ``resultCode`` attribute is 0, but there are endpoints that simply do not follow this rule. Args: data (dict): Response obtained from the API endpoint. key (string): Key to check for existence in the dict. Returns: bool: True if result was correct, False otherwise.
def prepare(self): ''' Run before get/posts etc. Pre-flight checks: - verify that we can speak back to them (compatible accept header) ''' # Find an acceptable content-type accept_header = self.request.headers.get('Accept', '*/*') # Ignore any parameter, including q (quality) one parsed_accept_header = [cgi.parse_header(h)[0] for h in accept_header.split(',')] def find_acceptable_content_type(parsed_accept_header): for media_range in parsed_accept_header: for content_type, dumper in self.ct_out_map: if fnmatch.fnmatch(content_type, media_range): return content_type, dumper return None, None content_type, dumper = find_acceptable_content_type(parsed_accept_header) # better return message? if not content_type: self.send_error(406) self.content_type = content_type self.dumper = dumper # do the common parts self.start = time.time() self.connected = True self.lowstate = self._get_lowstate()
Run before get/posts etc. Pre-flight checks: - verify that we can speak back to them (compatible accept header)
def findViewType(self, viewTypeName): """ Looks up the view type based on the inputed view type name. :param viewTypeName | <str> """ for viewType in self._viewTypes: if ( viewType.viewTypeName() == viewTypeName ): return viewType return None
Looks up the view type based on the inputed view type name. :param viewTypeName | <str>
def repr_as_line(self, additional_columns=None, only_show=None, sep=','): """ Returns a representation of the host as a single line, with columns joined by ``sep``. :param additional_columns: Columns to show in addition to defaults. :type additional_columns: ``list`` of ``str`` :param only_show: A specific list of columns to show. :type only_show: ``NoneType`` or ``list`` of ``str`` :param sep: The column separator to use. :type sep: ``str`` :rtype: ``str`` """ additional_columns = additional_columns or [] if only_show is not None: columns = _uniquify(only_show) else: columns = _uniquify(self.DEFAULT_COLUMNS + additional_columns) to_display = [self._get_attrib(c, convert_to_str=True) for c in columns] return sep.join(to_display)
Returns a representation of the host as a single line, with columns joined by ``sep``. :param additional_columns: Columns to show in addition to defaults. :type additional_columns: ``list`` of ``str`` :param only_show: A specific list of columns to show. :type only_show: ``NoneType`` or ``list`` of ``str`` :param sep: The column separator to use. :type sep: ``str`` :rtype: ``str``
def delete_data_source(self, data_source): """ Delete data source with it's name or ID. data_source = { 'imap': {'name': 'data-source-name'}} or data_source = { 'pop3': {'id': 'data-source-id'}} """ source_type = [k for k in data_source.keys()][0] complete_source = self.get_data_sources( source_id=data_source[source_type]['id']) folder_id = complete_source[source_type][0]['l'] self.delete_folders(folder_ids=[folder_id]) return self.request('DeleteDataSource', data_source)
Delete data source with it's name or ID. data_source = { 'imap': {'name': 'data-source-name'}} or data_source = { 'pop3': {'id': 'data-source-id'}}
def _enqueueIntoAllRemotes(self, msg: Any, signer: Signer) -> None: """ Enqueue the specified message into all the remotes in the nodestack. :param msg: the message to enqueue """ for rid in self.remotes.keys(): self._enqueue(msg, rid, signer)
Enqueue the specified message into all the remotes in the nodestack. :param msg: the message to enqueue
def send_command(self, command, arg=None): """Sends a command to the device. Args: command: The command to send. arg: Optional argument to the command. """ if arg is not None: command = '%s:%s' % (command, arg) self._write(six.StringIO(command), len(command))
Sends a command to the device. Args: command: The command to send. arg: Optional argument to the command.
def do_api_calls_update_cache(self): ''' Do API calls and save data in cache. ''' zones = self.parse_env_zones() data = self.group_instances(zones) self.cache.write_to_cache(data) self.inventory = data
Do API calls and save data in cache.
def create_table( self, table_name, obj=None, schema=None, database=None, external=False, force=False, # HDFS options format='parquet', location=None, partition=None, like_parquet=None, ): """ Create a new table in Impala using an Ibis table expression. This is currently designed for tables whose data is stored in HDFS (or eventually other filesystems). Parameters ---------- table_name : string obj : TableExpr or pandas.DataFrame, optional If passed, creates table from select statement results schema : ibis.Schema, optional Mutually exclusive with expr, creates an empty table with a particular schema database : string, default None (optional) force : boolean, default False Do not create table if table with indicated name already exists external : boolean, default False Create an external table; Impala will not delete the underlying data when the table is dropped format : {'parquet'} location : string, default None Specify the directory location where Impala reads and writes files for the table partition : list of strings Must pass a schema to use this. Cannot partition from an expression (create-table-as-select) like_parquet : string (HDFS path), optional Can specify in lieu of a schema Examples -------- >>> con.create_table('new_table_name', table_expr) # doctest: +SKIP """ if like_parquet is not None: raise NotImplementedError if obj is not None: if isinstance(obj, pd.DataFrame): from ibis.impala.pandas_interop import write_temp_dataframe writer, to_insert = write_temp_dataframe(self, obj) else: to_insert = obj ast = self._build_ast(to_insert, ImpalaDialect.make_context()) select = ast.queries[0] statement = ddl.CTAS( table_name, select, database=database, can_exist=force, format=format, external=external, partition=partition, path=location, ) elif schema is not None: statement = ddl.CreateTableWithSchema( table_name, schema, database=database, format=format, can_exist=force, external=external, path=location, partition=partition, ) else: raise com.IbisError('Must pass expr or schema') return self._execute(statement)
Create a new table in Impala using an Ibis table expression. This is currently designed for tables whose data is stored in HDFS (or eventually other filesystems). Parameters ---------- table_name : string obj : TableExpr or pandas.DataFrame, optional If passed, creates table from select statement results schema : ibis.Schema, optional Mutually exclusive with expr, creates an empty table with a particular schema database : string, default None (optional) force : boolean, default False Do not create table if table with indicated name already exists external : boolean, default False Create an external table; Impala will not delete the underlying data when the table is dropped format : {'parquet'} location : string, default None Specify the directory location where Impala reads and writes files for the table partition : list of strings Must pass a schema to use this. Cannot partition from an expression (create-table-as-select) like_parquet : string (HDFS path), optional Can specify in lieu of a schema Examples -------- >>> con.create_table('new_table_name', table_expr) # doctest: +SKIP
def get_filter(self, header=None, origin=1): """Get filter. Often, the regions files implicitly assume the lower-left corner of the image as a coordinate (1,1). However, the python convetion is that the array index starts from 0. By default (``origin=1``), coordinates of the returned mpl artists have coordinate shifted by (1, 1). If you do not want this shift, use ``origin=0``. Parameters ---------- header : `astropy.io.fits.Header` FITS header origin : {0, 1} Pixel coordinate origin Returns ------- filter : TODO Filter object """ from .region_to_filter import as_region_filter if header is None: if not self.check_imagecoord(): raise RuntimeError("the region has non-image coordinate. header is required.") reg_in_imagecoord = self else: reg_in_imagecoord = self.as_imagecoord(header) region_filter = as_region_filter(reg_in_imagecoord, origin=origin) return region_filter
Get filter. Often, the regions files implicitly assume the lower-left corner of the image as a coordinate (1,1). However, the python convetion is that the array index starts from 0. By default (``origin=1``), coordinates of the returned mpl artists have coordinate shifted by (1, 1). If you do not want this shift, use ``origin=0``. Parameters ---------- header : `astropy.io.fits.Header` FITS header origin : {0, 1} Pixel coordinate origin Returns ------- filter : TODO Filter object
def calc_list(request, id=None): # view associated to the endpoints /v1/calc/list and /v1/calc/:id/status """ Get a list of calculations and report their id, status, calculation_mode, is_running, description, and a url where more detailed information can be accessed. This is called several times by the Javascript. Responses are in JSON. """ base_url = _get_base_url(request) calc_data = logs.dbcmd('get_calcs', request.GET, utils.get_valid_users(request), utils.get_acl_on(request), id) response_data = [] username = psutil.Process(os.getpid()).username() for (hc_id, owner, status, calculation_mode, is_running, desc, pid, parent_id, size_mb) in calc_data: url = urlparse.urljoin(base_url, 'v1/calc/%d' % hc_id) abortable = False if is_running: try: if psutil.Process(pid).username() == username: abortable = True except psutil.NoSuchProcess: pass response_data.append( dict(id=hc_id, owner=owner, calculation_mode=calculation_mode, status=status, is_running=bool(is_running), description=desc, url=url, parent_id=parent_id, abortable=abortable, size_mb=size_mb)) # if id is specified the related dictionary is returned instead the list if id is not None: [response_data] = response_data return HttpResponse(content=json.dumps(response_data), content_type=JSON)
Get a list of calculations and report their id, status, calculation_mode, is_running, description, and a url where more detailed information can be accessed. This is called several times by the Javascript. Responses are in JSON.
def set_content_disposition(self, disptype: str, quote_fields: bool=True, **params: Any) -> None: """Sets ``Content-Disposition`` header.""" self._headers[hdrs.CONTENT_DISPOSITION] = content_disposition_header( disptype, quote_fields=quote_fields, **params)
Sets ``Content-Disposition`` header.
def nice_pkg_name(name): """todo: Docstring for nice_pkg_name :param name: arg description :type name: type description :return: :rtype: """ logger.debug("%s", name) root, ext = os.path.splitext(name) logger.debug("root :'%s', ext: '%s'", root, ext) if ext in ugly_ext: logger.debug("remove ext %s to get %s", ext, root) return root logger.debug("no change %s", name) return name
todo: Docstring for nice_pkg_name :param name: arg description :type name: type description :return: :rtype:
def render_with(template=None, json=False, jsonp=False): """ Decorator to render the wrapped function with the given template (or dictionary of mimetype keys to templates, where the template is a string name of a template file or a callable that returns a Response). The function's return value must be a dictionary and is passed to the template as parameters. Callable templates get a single parameter with the function's return value. Usage:: @app.route('/myview') @render_with('myview.html') def myview(): return {'data': 'value'} @app.route('/myview_with_json') @render_with('myview.html', json=True) def myview_no_json(): return {'data': 'value'} @app.route('/otherview') @render_with({ 'text/html': 'otherview.html', 'text/xml': 'otherview.xml'}) def otherview(): return {'data': 'value'} @app.route('/404view') @render_with('myview.html') def myview(): return {'error': '404 Not Found'}, 404 @app.route('/headerview') @render_with('myview.html') def myview(): return {'data': 'value'}, 200, {'X-Header': 'Header value'} When a mimetype is specified and the template is not a callable, the response is returned with the same mimetype. Callable templates must return Response objects to ensure the correct mimetype is set. If a dictionary of templates is provided and does not include a handler for ``*/*``, render_with will attempt to use the handler for (in order) ``text/html``, ``text/plain`` and the various JSON types, falling back to rendering the value into a unicode string. If the method is called outside a request context, the wrapped method's original return value is returned. This is meant to facilitate testing and should not be used to call the method from within another view handler as the presence of a request context will trigger template rendering. Rendering may also be suspended by calling the view handler with ``_render=False``. render_with provides JSON and JSONP handlers for the ``application/json``, ``text/json`` and ``text/x-json`` mimetypes if ``json`` or ``jsonp`` is True (default is False). :param template: Single template, or dictionary of MIME type to templates. If the template is a callable, it is called with the output of the wrapped function :param json: Helper to add a JSON handler (default is False) :param jsonp: Helper to add a JSONP handler (if True, also provides JSON, default is False) """ if jsonp: templates = { 'application/json': dict_jsonp, 'application/javascript': dict_jsonp, } elif json: templates = { 'application/json': dict_jsonify, } else: templates = {} if isinstance(template, six.string_types): templates['text/html'] = template elif isinstance(template, dict): templates.update(template) elif template is None and (json or jsonp): pass else: # pragma: no cover raise ValueError("Expected string or dict for template") default_mimetype = '*/*' if '*/*' not in templates: templates['*/*'] = six.text_type default_mimetype = 'text/plain' for mimetype in ('text/html', 'text/plain', 'application/json'): if mimetype in templates: templates['*/*'] = templates[mimetype] default_mimetype = mimetype # Remember which mimetype's handler is serving for */* break template_mimetypes = list(templates.keys()) template_mimetypes.remove('*/*') # */* messes up matching, so supply it only as last resort def inner(f): @wraps(f) def decorated_function(*args, **kwargs): # Check if we need to bypass rendering render = kwargs.pop('_render', True) # Get the result result = f(*args, **kwargs) # Is the result a Response object? Don't attempt rendering if isinstance(result, (Response, WerkzeugResponse, current_app.response_class)): return result # Did the result include status code and headers? if isinstance(result, tuple): resultset = result result = resultset[0] if len(resultset) > 1: status_code = resultset[1] else: status_code = None if len(resultset) > 2: headers = Headers(resultset[2]) else: headers = Headers() else: status_code = None headers = Headers() if len(templates) > 1: # If we have more than one template handler if 'Vary' in headers: vary_values = [item.strip() for item in headers['Vary'].split(',')] if 'Accept' not in vary_values: vary_values.append('Accept') headers['Vary'] = ', '.join(vary_values) else: headers['Vary'] = 'Accept' # Find a matching mimetype between Accept headers and available templates use_mimetype = None if render and request: # We do not use request.accept_mimetypes.best_match because it turns out to # be buggy: it returns the least match instead of the best match. # use_mimetype = request.accept_mimetypes.best_match(template_mimetypes, '*/*') use_mimetype = _best_mimetype_match(template_mimetypes, request.accept_mimetypes, '*/*') # Now render the result with the template for the mimetype if use_mimetype is not None: if callable(templates[use_mimetype]): rendered = templates[use_mimetype](result) if isinstance(rendered, Response): if status_code is not None: rendered.status_code = status_code if headers is not None: rendered.headers.extend(headers) else: rendered = current_app.response_class( rendered, status=status_code, headers=headers, mimetype=default_mimetype if use_mimetype == '*/*' else use_mimetype) else: # Not a callable mimetype. Render as a jinja2 template rendered = current_app.response_class( render_template(templates[use_mimetype], **result), status=status_code or 200, headers=headers, mimetype=default_mimetype if use_mimetype == '*/*' else use_mimetype) return rendered else: return result return decorated_function return inner
Decorator to render the wrapped function with the given template (or dictionary of mimetype keys to templates, where the template is a string name of a template file or a callable that returns a Response). The function's return value must be a dictionary and is passed to the template as parameters. Callable templates get a single parameter with the function's return value. Usage:: @app.route('/myview') @render_with('myview.html') def myview(): return {'data': 'value'} @app.route('/myview_with_json') @render_with('myview.html', json=True) def myview_no_json(): return {'data': 'value'} @app.route('/otherview') @render_with({ 'text/html': 'otherview.html', 'text/xml': 'otherview.xml'}) def otherview(): return {'data': 'value'} @app.route('/404view') @render_with('myview.html') def myview(): return {'error': '404 Not Found'}, 404 @app.route('/headerview') @render_with('myview.html') def myview(): return {'data': 'value'}, 200, {'X-Header': 'Header value'} When a mimetype is specified and the template is not a callable, the response is returned with the same mimetype. Callable templates must return Response objects to ensure the correct mimetype is set. If a dictionary of templates is provided and does not include a handler for ``*/*``, render_with will attempt to use the handler for (in order) ``text/html``, ``text/plain`` and the various JSON types, falling back to rendering the value into a unicode string. If the method is called outside a request context, the wrapped method's original return value is returned. This is meant to facilitate testing and should not be used to call the method from within another view handler as the presence of a request context will trigger template rendering. Rendering may also be suspended by calling the view handler with ``_render=False``. render_with provides JSON and JSONP handlers for the ``application/json``, ``text/json`` and ``text/x-json`` mimetypes if ``json`` or ``jsonp`` is True (default is False). :param template: Single template, or dictionary of MIME type to templates. If the template is a callable, it is called with the output of the wrapped function :param json: Helper to add a JSON handler (default is False) :param jsonp: Helper to add a JSONP handler (if True, also provides JSON, default is False)
def getStartNodes(fdefs,calls): '''Return a list of nodes in fdefs that have no inbound edges''' s=[] for source in fdefs: for fn in fdefs[source]: inboundEdges=False for call in calls: if call.target==fn: inboundEdges=True if not inboundEdges: s.append(fn) return s
Return a list of nodes in fdefs that have no inbound edges
def _queue_into_buffer(transfersession): """ Takes a chunk of data from the store to be put into the buffer to be sent to another morango instance. """ last_saved_by_conditions = [] filter_prefixes = Filter(transfersession.filter) server_fsic = json.loads(transfersession.server_fsic) client_fsic = json.loads(transfersession.client_fsic) if transfersession.push: fsics = _fsic_queuing_calc(client_fsic, server_fsic) else: fsics = _fsic_queuing_calc(server_fsic, client_fsic) # if fsics are identical or receiving end has newer data, then there is nothing to queue if not fsics: return # create condition for all push FSICs where instance_ids are equal, but internal counters are higher than FSICs counters for instance, counter in six.iteritems(fsics): last_saved_by_conditions += ["(last_saved_instance = '{0}' AND last_saved_counter > {1})".format(instance, counter)] if fsics: last_saved_by_conditions = [_join_with_logical_operator(last_saved_by_conditions, 'OR')] partition_conditions = [] # create condition for filtering by partitions for prefix in filter_prefixes: partition_conditions += ["partition LIKE '{}%'".format(prefix)] if filter_prefixes: partition_conditions = [_join_with_logical_operator(partition_conditions, 'OR')] # combine conditions fsic_and_partition_conditions = _join_with_logical_operator(last_saved_by_conditions + partition_conditions, 'AND') # filter by profile where_condition = _join_with_logical_operator([fsic_and_partition_conditions, "profile = '{}'".format(transfersession.sync_session.profile)], 'AND') # execute raw sql to take all records that match condition, to be put into buffer for transfer with connection.cursor() as cursor: queue_buffer = """INSERT INTO {outgoing_buffer} (model_uuid, serialized, deleted, last_saved_instance, last_saved_counter, hard_deleted, model_name, profile, partition, source_id, conflicting_serialized_data, transfer_session_id, _self_ref_fk) SELECT id, serialized, deleted, last_saved_instance, last_saved_counter, hard_deleted, model_name, profile, partition, source_id, conflicting_serialized_data, '{transfer_session_id}', _self_ref_fk FROM {store} WHERE {condition}""".format(outgoing_buffer=Buffer._meta.db_table, transfer_session_id=transfersession.id, condition=where_condition, store=Store._meta.db_table) cursor.execute(queue_buffer) # take all record max counters that are foreign keyed onto store models, which were queued into the buffer queue_rmc_buffer = """INSERT INTO {outgoing_rmcb} (instance_id, counter, transfer_session_id, model_uuid) SELECT instance_id, counter, '{transfer_session_id}', store_model_id FROM {record_max_counter} AS rmc INNER JOIN {outgoing_buffer} AS buffer ON rmc.store_model_id = buffer.model_uuid WHERE buffer.transfer_session_id = '{transfer_session_id}' """.format(outgoing_rmcb=RecordMaxCounterBuffer._meta.db_table, transfer_session_id=transfersession.id, record_max_counter=RecordMaxCounter._meta.db_table, outgoing_buffer=Buffer._meta.db_table) cursor.execute(queue_rmc_buffer)
Takes a chunk of data from the store to be put into the buffer to be sent to another morango instance.
def from_zeros(self, lmax, gm, r0, omega=None, errors=False, normalization='4pi', csphase=1): """ Initialize the class with spherical harmonic coefficients set to zero from degree 1 to lmax, and set the degree 0 term to 1. Usage ----- x = SHGravCoeffs.from_zeros(lmax, gm, r0, [omega, errors, normalization, csphase]) Returns ------- x : SHGravCoeffs class instance. Parameters ---------- lmax : int The maximum spherical harmonic degree l of the coefficients. gm : float The gravitational constant times the mass that is associated with the gravitational potential coefficients. r0 : float The reference radius of the spherical harmonic coefficients. omega : float, optional, default = None The angular rotation rate of the body. errors : bool, optional, default = False If True, initialize the attribute errors with zeros. normalization : str, optional, default = '4pi' '4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized, orthonormalized, Schmidt semi-normalized, or unnormalized coefficients, respectively. csphase : int, optional, default = 1 Condon-Shortley phase convention: 1 to exclude the phase factor, or -1 to include it. """ if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'): raise ValueError( "The normalization must be '4pi', 'ortho', 'schmidt', " "or 'unnorm'. Input value was {:s}." .format(repr(normalization)) ) if csphase != 1 and csphase != -1: raise ValueError( "csphase must be either 1 or -1. Input value was {:s}." .format(repr(csphase)) ) if normalization.lower() == 'unnorm' and lmax > 85: _warnings.warn("Calculations using unnormalized coefficients " "are stable only for degrees less than or equal " "to 85. lmax for the coefficients will be set to " "85. Input value was {:d}.".format(lmax), category=RuntimeWarning) lmax = 85 coeffs = _np.zeros((2, lmax + 1, lmax + 1)) coeffs[0, 0, 0] = 1.0 if errors is False: clm = SHGravRealCoeffs(coeffs, gm=gm, r0=r0, omega=omega, normalization=normalization.lower(), csphase=csphase) else: clm = SHGravRealCoeffs(coeffs, gm=gm, r0=r0, omega=omega, errors=_np.zeros((2, lmax + 1, lmax + 1)), normalization=normalization.lower(), csphase=csphase) return clm
Initialize the class with spherical harmonic coefficients set to zero from degree 1 to lmax, and set the degree 0 term to 1. Usage ----- x = SHGravCoeffs.from_zeros(lmax, gm, r0, [omega, errors, normalization, csphase]) Returns ------- x : SHGravCoeffs class instance. Parameters ---------- lmax : int The maximum spherical harmonic degree l of the coefficients. gm : float The gravitational constant times the mass that is associated with the gravitational potential coefficients. r0 : float The reference radius of the spherical harmonic coefficients. omega : float, optional, default = None The angular rotation rate of the body. errors : bool, optional, default = False If True, initialize the attribute errors with zeros. normalization : str, optional, default = '4pi' '4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized, orthonormalized, Schmidt semi-normalized, or unnormalized coefficients, respectively. csphase : int, optional, default = 1 Condon-Shortley phase convention: 1 to exclude the phase factor, or -1 to include it.
def _ask_for_ledger_status(self, node_name: str, ledger_id): """ Ask other node for LedgerStatus """ self.request_msg(LEDGER_STATUS, {f.LEDGER_ID.nm: ledger_id}, [node_name, ]) logger.info("{} asking {} for ledger status of ledger {}".format(self, node_name, ledger_id))
Ask other node for LedgerStatus
def reissue(self, order_id, csr, software_id, organization_handle, approver_email=None, signature_hash_algorithm=None, domain_validation_methods=None, hostnames=None, technical_handle=None): """Reissue an SSL certificate order""" response = self.request(E.reissueSslCertRequest( E.id(order_id), E.csr(csr), E.softwareId(software_id), E.organizationHandle(organization_handle), OE('approverEmail', approver_email), OE('signatureHashAlgorithm', signature_hash_algorithm), OE('domainValidationMethods', domain_validation_methods, transform=_domain_validation_methods), OE('hostNames', hostnames, transform=_simple_array), OE('technicalHandle', technical_handle), )) return int(response.data.id)
Reissue an SSL certificate order
def order_by(self, **kwargs): """ Analog to SQL "ORDER BY". +kwargs+ should only contain one item. examples) NO: repo.order_by() NO: repo.order_by(id="desc", name="asc") YES: repo.order_by(id="asc) """ if kwargs: col, order = kwargs.popitem() self.order_clause = "order by {col} {order} ".format( col=col, order=order) return self
Analog to SQL "ORDER BY". +kwargs+ should only contain one item. examples) NO: repo.order_by() NO: repo.order_by(id="desc", name="asc") YES: repo.order_by(id="asc)
def move_file_to_file(old_path, new_path): """Moves file from old location to new one :param old_path: path of file to move :param new_path: new path """ try: os.rename(old_path, new_path) except: old_file = os.path.basename(old_path) target_directory, target_file = os.path.dirname( os.path.abspath(new_path)), os.path.basename(new_path) Document.move_file_to_directory( old_path, target_directory ) # move old file to new directory, change name to new name os.rename(os.path.join(target_directory, old_file), os.path.join(target_directory, target_file))
Moves file from old location to new one :param old_path: path of file to move :param new_path: new path
def parse_args(arguments=None, root=None, apply_config=False): """Parse the arguments from the CLI. If apply_config then we first look up and apply configs using apply_config_defaults. """ if arguments is None: arguments = [] parser = create_parser() args = parser.parse_args(arguments) if apply_config: parser = apply_config_defaults(parser, args, root=root) args = parser.parse_args(arguments) # sanity check args (from autopep8) if args.max_line_length <= 0: # pragma: no cover parser.error('--max-line-length must be greater than 0') if args.select: args.select = _split_comma_separated(args.select) if args.ignore: args.ignore = _split_comma_separated(args.ignore) elif not args.select and args.aggressive: # Enable everything by default if aggressive. args.select = ['E', 'W'] else: args.ignore = _split_comma_separated(DEFAULT_IGNORE) if args.exclude: args.exclude = _split_comma_separated(args.exclude) else: args.exclude = [] return args
Parse the arguments from the CLI. If apply_config then we first look up and apply configs using apply_config_defaults.
def filter_db_names(paths: List[str]) -> List[str]: """Returns a filtered list of `paths`, where every name matches our format. Args: paths: A list of file names. """ return [ db_path for db_path in paths if VERSION_RE.match(os.path.basename(db_path)) ]
Returns a filtered list of `paths`, where every name matches our format. Args: paths: A list of file names.
def to_op(self): """ Extracts the modification operation from the set. :rtype: dict, None """ if not self._adds and not self._removes: return None changes = {} if self._adds: changes['adds'] = list(self._adds) if self._removes: changes['removes'] = list(self._removes) return changes
Extracts the modification operation from the set. :rtype: dict, None
def send_message(self, message, room_id, **kwargs): """ Send a message to a given room """ return SendMessage(settings=self.settings, **kwargs).call( message=message, room_id=room_id, **kwargs )
Send a message to a given room
def index_document(self, text, url): "Index the text of a document." ## For now, use first line for title title = text[:text.index('\n')].strip() docwords = words(text) docid = len(self.documents) self.documents.append(Document(title, url, len(docwords))) for word in docwords: if word not in self.stopwords: self.index[word][docid] += 1
Index the text of a document.
def get_file(self, fax_id, **kwargs): # noqa: E501 """get a file # noqa: E501 Get your fax archive file using it's id. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_file(fax_id, async=True) >>> result = thread.get() :param async bool :param str fax_id: (required) :param str format: can be 'pdf' or 'tiff' :return: file If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_file_with_http_info(fax_id, **kwargs) # noqa: E501 else: (data) = self.get_file_with_http_info(fax_id, **kwargs) # noqa: E501 return data
get a file # noqa: E501 Get your fax archive file using it's id. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_file(fax_id, async=True) >>> result = thread.get() :param async bool :param str fax_id: (required) :param str format: can be 'pdf' or 'tiff' :return: file If the method is called asynchronously, returns the request thread.
def to_singular(word): """Attempts to singularize a word.""" if word[-1] != "s": return word elif word.endswith("ies"): return word[:-3] + "y" elif word.endswith("ses"): return word[:-2] else: return word[:-1]
Attempts to singularize a word.
def multihead_attention_2d(query_antecedent, memory_antecedent, total_key_depth, total_value_depth, output_depth, num_heads, attention_type="local_attention_2d", query_shape=(8, 16), memory_flange=(8, 16), name=None): """2d Multihead scaled-dot-product attention with inp/output transformations. Args: query_antecedent: a Tensor with shape [batch, h, w, depth_k] memory_antecedent: a Tensor with shape [batch, h, w, depth_k] total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth attention_type: String, type of attention function to use. query_shape: an tuple indicating the height and width of each query block. memory_flange: an integer indicating how much to look in height and width name: an optional string Returns: A Tensor of shape [batch, h, w, output_depth] Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads. """ if total_key_depth % num_heads != 0: raise ValueError("Key depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_key_depth, num_heads)) if total_value_depth % num_heads != 0: raise ValueError("Value depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_value_depth, num_heads)) with tf.variable_scope( name, default_name="multihead_attention_2d", values=[query_antecedent, memory_antecedent]): q, k, v = compute_qkv(query_antecedent, memory_antecedent, total_key_depth, total_value_depth) # after splitting, shape is [batch, heads, h, w, depth] q = split_heads_2d(q, num_heads) k = split_heads_2d(k, num_heads) v = split_heads_2d(v, num_heads) key_depth_per_head = total_key_depth // num_heads q *= key_depth_per_head**-0.5 if attention_type == "local_attention_2d": x = local_attention_2d( q, k, v, query_shape=query_shape, memory_flange=memory_flange) elif attention_type == "masked_local_attention_2d": assert attention_type == "masked_local_attention_2d" x = masked_local_attention_2d( q, k, v, query_shape=query_shape, memory_flange=memory_flange) else: assert attention_type == "unmasked_local_attention_2d_tpu" x = dot_product_unmasked_attention_local_2d_tpu( q, k, v, None, max_relative_position=None, query_shape=query_shape) x = combine_heads_2d(x) x = common_layers.dense( x, output_depth, use_bias=False, name="output_transform") return x
2d Multihead scaled-dot-product attention with inp/output transformations. Args: query_antecedent: a Tensor with shape [batch, h, w, depth_k] memory_antecedent: a Tensor with shape [batch, h, w, depth_k] total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth attention_type: String, type of attention function to use. query_shape: an tuple indicating the height and width of each query block. memory_flange: an integer indicating how much to look in height and width name: an optional string Returns: A Tensor of shape [batch, h, w, output_depth] Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads.
def backward(self, speed=1): """ Drive the robot backward by running both motors backward. :param float speed: Speed at which to drive the motors, as a value between 0 (stopped) and 1 (full speed). The default is 1. """ self.left_motor.backward(speed) self.right_motor.backward(speed)
Drive the robot backward by running both motors backward. :param float speed: Speed at which to drive the motors, as a value between 0 (stopped) and 1 (full speed). The default is 1.
def extract(self, variable_idx): """ Extract a specific varaible """ branch = self._define_branch(variable_idx) label = self.profiles[variable_idx].replace("\n", "") self.label[variable_idx] = label self.data[variable_idx] = [[], []] with open(self.abspath) as fobj: for line in fobj.readlines()[ variable_idx+1+self._attributes['data_idx']:: self._attributes['nvar']+1]: points = [] for point in line.split(' '): try: points.append(float(point)) except ValueError: pass self.data[variable_idx][1].append(np.array(points)) x_st = self.geometries[branch][0] x_no_st = [(x0+x1)/2 for x0, x1 in zip(x_st[:-1], x_st[1:])] if len(self.data[variable_idx][1][0]) == len(x_st): self.data[variable_idx][0] = np.array(x_st) else: self.data[variable_idx][0] = np.array(x_no_st)
Extract a specific varaible
def loadTextureD3D11_Async(self, textureId, pD3D11Device): """Creates a D3D11 texture and loads data into it.""" fn = self.function_table.loadTextureD3D11_Async ppD3D11Texture2D = c_void_p() result = fn(textureId, pD3D11Device, byref(ppD3D11Texture2D)) return result, ppD3D11Texture2D.value
Creates a D3D11 texture and loads data into it.
def send_loop(): """ Loop indefinitely, checking queue at intervals of EMPTY_QUEUE_SLEEP and sending messages if any are on queue. """ while True: while not Message.objects.all(): logging.debug("sleeping for %s seconds before checking queue again" % EMPTY_QUEUE_SLEEP) time.sleep(EMPTY_QUEUE_SLEEP) send_all()
Loop indefinitely, checking queue at intervals of EMPTY_QUEUE_SLEEP and sending messages if any are on queue.
def _get_I(self, a, b, size, plus_transpose=True): """Return I matrix in Chaput's PRL paper. None is returned if I is zero matrix. """ r_sum = np.zeros((3, 3), dtype='double', order='C') for r in self._rotations_cartesian: for i in range(3): for j in range(3): r_sum[i, j] += r[a, i] * r[b, j] if plus_transpose: r_sum += r_sum.T # Return None not to consume computer for diagonalization if (np.abs(r_sum) < 1e-10).all(): return None # Same as np.kron(np.eye(size), r_sum), but writen as below # to be sure the values in memory C-congiguous with 'double'. I_mat = np.zeros((3 * size, 3 * size), dtype='double', order='C') for i in range(size): I_mat[(i * 3):((i + 1) * 3), (i * 3):((i + 1) * 3)] = r_sum return I_mat
Return I matrix in Chaput's PRL paper. None is returned if I is zero matrix.
def byte_size(self, selection=False, virtual=False): """Return the size in bytes the whole DataFrame requires (or the selection), respecting the active_fraction.""" bytes_per_row = 0 N = self.count(selection=selection) extra = 0 for column in list(self.get_column_names(virtual=virtual)): dtype = self.dtype(column) dtype_internal = self.dtype(column, internal=True) #if dtype in [str_type, str] and dtype_internal.kind == 'O': if isinstance(self.columns[column], ColumnString): # TODO: document or fix this # is it too expensive to calculate this exactly? extra += self.columns[column].nbytes else: bytes_per_row += dtype_internal.itemsize if np.ma.isMaskedArray(self.columns[column]): bytes_per_row += 1 return bytes_per_row * self.count(selection=selection) + extra
Return the size in bytes the whole DataFrame requires (or the selection), respecting the active_fraction.
def CreateBitmap(self, artid, client, size): """Adds custom images to Artprovider""" if artid in self.extra_icons: return wx.Bitmap(self.extra_icons[artid], wx.BITMAP_TYPE_ANY) else: return wx.ArtProvider.GetBitmap(artid, client, size)
Adds custom images to Artprovider
def read_version(): """Read version from the first line starting with digit """ regex = re.compile('^(?P<number>\d.*?) .*$') with open('../CHANGELOG.rst') as f: for line in f: match = regex.match(line) if match: return match.group('number')
Read version from the first line starting with digit
def request( self, method, url, data=None, headers=None, withhold_token=False, client_id=None, client_secret=None, **kwargs ): """Intercept all requests and add the OAuth 2 token if present.""" if not is_secure_transport(url): raise InsecureTransportError() if self.token and not withhold_token: log.debug( "Invoking %d protected resource request hooks.", len(self.compliance_hook["protected_request"]), ) for hook in self.compliance_hook["protected_request"]: log.debug("Invoking hook %s.", hook) url, headers, data = hook(url, headers, data) log.debug("Adding token %s to request.", self.token) try: url, headers, data = self._client.add_token( url, http_method=method, body=data, headers=headers ) # Attempt to retrieve and save new access token if expired except TokenExpiredError: if self.auto_refresh_url: log.debug( "Auto refresh is set, attempting to refresh at %s.", self.auto_refresh_url, ) # We mustn't pass auth twice. auth = kwargs.pop("auth", None) if client_id and client_secret and (auth is None): log.debug( 'Encoding client_id "%s" with client_secret as Basic auth credentials.', client_id, ) auth = requests.auth.HTTPBasicAuth(client_id, client_secret) token = self.refresh_token( self.auto_refresh_url, auth=auth, **kwargs ) if self.token_updater: log.debug( "Updating token to %s using %s.", token, self.token_updater ) self.token_updater(token) url, headers, data = self._client.add_token( url, http_method=method, body=data, headers=headers ) else: raise TokenUpdated(token) else: raise log.debug("Requesting url %s using method %s.", url, method) log.debug("Supplying headers %s and data %s", headers, data) log.debug("Passing through key word arguments %s.", kwargs) return super(OAuth2Session, self).request( method, url, headers=headers, data=data, **kwargs )
Intercept all requests and add the OAuth 2 token if present.
def parse(self, line): """Parse a line of the Nginx error log""" csv_list = line.split(",") date_time_message = csv_list.pop(0).split(" ", 2) otherinfo = dict() for item in csv_list: key_value_pair = item.split(":", 1) key = key_value_pair[0].strip() if len(key_value_pair) > 1: value = key_value_pair[1].strip() if not value: value = "-" else: value = "-" otherinfo[key] = value self.message = '%s\n' \ 'Date: %s\n' \ 'Time: %s\n' \ 'Request: %s\n' \ 'Referrer: %s\n' \ 'Server: %s\n' \ 'Client: %s\n' \ 'Host: %s\n' \ 'Upstream: %s\n' self.params = [ date_time_message[2], date_time_message[0], date_time_message[1], otherinfo.get("request", "-"), otherinfo.get("referrer", "-"), otherinfo.get("server", "-"), otherinfo.get("client", "-"), otherinfo.get("host", "-"), otherinfo.get("upstream", "-"), ] self.site = otherinfo.get("referrer", "-")
Parse a line of the Nginx error log
def get_tag(note_store, my_tags): """ get the tags from his Evernote account :param note_store Evernote Instance :param my_tags string :return: array of the tag to create """ tag_id = [] listtags = note_store.listTags() # cut the string by piece of tag with comma for my_tag in my_tags.split(','): for tag in listtags: # remove space before and after # thus we keep "foo bar" # but not " foo bar" nor "foo bar " if tag.name.lower() == my_tag.lower().lstrip().rstrip(): tag_id.append(tag.guid) break return tag_id
get the tags from his Evernote account :param note_store Evernote Instance :param my_tags string :return: array of the tag to create
def __record_progress(self, next_step=None): """ __record_progress: save progress to respective restoration file Args: None Returns: None """ config.SUSHI_BAR_CLIENT.report_progress( self.get_status(), self.get_status().value/Status.DONE.value) if next_step: now = time.time() config.SUSHI_BAR_CLIENT.report_stage(self.get_status(), now - self.timestamp) self.timestamp = now self.status = next_step with open(self.get_restore_path(Status.LAST), 'wb') as handle, open(self.get_restore_path(), 'wb') as step_handle: pickle.dump(self, handle) pickle.dump(self, step_handle)
__record_progress: save progress to respective restoration file Args: None Returns: None
def trace(self, data, callback=None): """Queue data for tracing Args: data (bytearray, string): Unstructured data to trace to any connected client. callback (callable): An optional callback that will be called with a bool value of True when this data actually gets traced. If the client disconnects and the data is dropped instead, callback will be called with False. """ conn_id = self._find_connection(self.conn_string) if conn_id is not None: self.adapter.notify_event_nowait(self.conn_string, 'trace', data) if callback is not None: callback(conn_id is not None)
Queue data for tracing Args: data (bytearray, string): Unstructured data to trace to any connected client. callback (callable): An optional callback that will be called with a bool value of True when this data actually gets traced. If the client disconnects and the data is dropped instead, callback will be called with False.
def on_graphs_menu_close(self, update): """Return to main screen and update sensor that are active in the view""" logging.info("closing sensor menu, update=%s", update) if update: for sensor, visible_sensors in \ self.graphs_menu.active_sensors.items(): self.graphs[sensor].set_visible_graphs(visible_sensors) # If not sensor is selected, do not display the graph if sensor in self.visible_graphs and not any(visible_sensors): del self.visible_graphs[sensor] elif not any(visible_sensors): pass # Update visible graphs if a sensor was selected else: self.visible_graphs[sensor] = self.graphs[sensor] self.show_graphs() self.original_widget = self.main_window_w
Return to main screen and update sensor that are active in the view
def pointerEvent(self, x, y, buttonmask=0): """Indicates either pointer movement or a pointer button press or release. The pointer is now at (x-position, y-position), and the current state of buttons 1 to 8 are represented by bits 0 to 7 of button-mask respectively, 0 meaning up, 1 meaning down (pressed). """ self.transport.write(pack("!BBHH", 5, buttonmask, x, y))
Indicates either pointer movement or a pointer button press or release. The pointer is now at (x-position, y-position), and the current state of buttons 1 to 8 are represented by bits 0 to 7 of button-mask respectively, 0 meaning up, 1 meaning down (pressed).
def indent(rows, hasHeader=False, headerChar='-', delim=' | ', justify='left', separateRows=False, prefix='', postfix='', wrapfunc=lambda x: x): '''Indents a table by column. - rows: A sequence of sequences of items, one sequence per row. - hasHeader: True if the first row consists of the columns' names. - headerChar: Character to be used for the row separator line (if hasHeader==True or separateRows==True). - delim: The column delimiter. - justify: Determines how are data justified in their column. Valid values are 'left','right' and 'center'. - separateRows: True if rows are to be separated by a line of 'headerChar's. - prefix: A string prepended to each printed row. - postfix: A string appended to each printed row. - wrapfunc: A function f(text) for wrapping text; each element in the table is first wrapped by this function.''' # closure for breaking logical rows to physical, using wrapfunc def rowWrapper(row): newRows = [wrapfunc(item).split('\n') for item in row] return [[substr or '' for substr in item] for item in map(None, *newRows)] # NOQA # break each logical row into one or more physical ones logicalRows = [rowWrapper(row) for row in rows] # columns of physical rows columns = map(None, *reduce(operator.add, logicalRows)) # get the maximum of each column by the string length of its items maxWidths = [max([len(str(item)) for item in column]) for column in columns] rowSeparator = headerChar * (len(prefix) + len(postfix) + sum(maxWidths) + len(delim)*(len(maxWidths)-1)) # select the appropriate justify method justify = {'center': str.center, 'right': str.rjust, 'left': str.ljust}[justify.lower()] # NOQA output = cStringIO.StringIO() if separateRows: print >> output, rowSeparator for physicalRows in logicalRows: for row in physicalRows: print >> output, prefix \ + delim.join([justify(str(item), width) for (item, width) in zip(row, maxWidths)]) + postfix # NOQA if separateRows or hasHeader: print >> output, rowSeparator hasHeader = False return output.getvalue()
Indents a table by column. - rows: A sequence of sequences of items, one sequence per row. - hasHeader: True if the first row consists of the columns' names. - headerChar: Character to be used for the row separator line (if hasHeader==True or separateRows==True). - delim: The column delimiter. - justify: Determines how are data justified in their column. Valid values are 'left','right' and 'center'. - separateRows: True if rows are to be separated by a line of 'headerChar's. - prefix: A string prepended to each printed row. - postfix: A string appended to each printed row. - wrapfunc: A function f(text) for wrapping text; each element in the table is first wrapped by this function.
def register_on_medium_changed(self, callback): """Set the callback function to consume on medium changed events. Callback receives a IMediumChangedEvent object. Returns the callback_id """ event_type = library.VBoxEventType.on_medium_changed return self.event_source.register_callback(callback, event_type)
Set the callback function to consume on medium changed events. Callback receives a IMediumChangedEvent object. Returns the callback_id
def display_results(repo_name, contributors, api_len): """ Fancy display. """ print("\n") print("All Contributors:") # Sort and consolidate on Name seen = [] for user in sorted(contributors, key=_sort_by_name): if user.get("name"): key = user["name"] else: key = user["user_name"] if key not in seen: seen.append(key) if key != user["user_name"]: print("%s (%s)" % (user["name"], user["user_name"])) else: print(user["user_name"]) print("") print("Repo: %s" % repo_name) print("GitHub Contributors: %s" % api_len) print("All Contributors: %s 👏" % len(seen))
Fancy display.
def get_clean_factor_and_forward_returns(factor, prices, groupby=None, binning_by_group=False, quantiles=5, bins=None, periods=(1, 5, 10), filter_zscore=20, groupby_labels=None, max_loss=0.35, zero_aware=False, cumulative_returns=True): """ Formats the factor data, pricing data, and group mappings into a DataFrame that contains aligned MultiIndex indices of timestamp and asset. The returned data will be formatted to be suitable for Alphalens functions. It is safe to skip a call to this function and still make use of Alphalens functionalities as long as the factor data conforms to the format returned from get_clean_factor_and_forward_returns and documented here Parameters ---------- factor : pd.Series - MultiIndex A MultiIndex Series indexed by timestamp (level 0) and asset (level 1), containing the values for a single alpha factor. :: ----------------------------------- date | asset | ----------------------------------- | AAPL | 0.5 ----------------------- | BA | -1.1 ----------------------- 2014-01-01 | CMG | 1.7 ----------------------- | DAL | -0.1 ----------------------- | LULU | 2.7 ----------------------- prices : pd.DataFrame A wide form Pandas DataFrame indexed by timestamp with assets in the columns. Pricing data must span the factor analysis time period plus an additional buffer window that is greater than the maximum number of expected periods in the forward returns calculations. It is important to pass the correct pricing data in depending on what time of period your signal was generated so to avoid lookahead bias, or delayed calculations. 'Prices' must contain at least an entry for each timestamp/asset combination in 'factor'. This entry should reflect the buy price for the assets and usually it is the next available price after the factor is computed but it can also be a later price if the factor is meant to be traded later (e.g. if the factor is computed at market open but traded 1 hour after market open the price information should be 1 hour after market open). 'Prices' must also contain entries for timestamps following each timestamp/asset combination in 'factor', as many more timestamps as the maximum value in 'periods'. The asset price after 'period' timestamps will be considered the sell price for that asset when computing 'period' forward returns. :: ---------------------------------------------------- | AAPL | BA | CMG | DAL | LULU | ---------------------------------------------------- Date | | | | | | ---------------------------------------------------- 2014-01-01 |605.12| 24.58| 11.72| 54.43 | 37.14 | ---------------------------------------------------- 2014-01-02 |604.35| 22.23| 12.21| 52.78 | 33.63 | ---------------------------------------------------- 2014-01-03 |607.94| 21.68| 14.36| 53.94 | 29.37 | ---------------------------------------------------- groupby : pd.Series - MultiIndex or dict Either A MultiIndex Series indexed by date and asset, containing the period wise group codes for each asset, or a dict of asset to group mappings. If a dict is passed, it is assumed that group mappings are unchanged for the entire time period of the passed factor data. binning_by_group : bool If True, compute quantile buckets separately for each group. This is useful when the factor values range vary considerably across gorups so that it is wise to make the binning group relative. You should probably enable this if the factor is intended to be analyzed for a group neutral portfolio quantiles : int or sequence[float] Number of equal-sized quantile buckets to use in factor bucketing. Alternately sequence of quantiles, allowing non-equal-sized buckets e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95] Only one of 'quantiles' or 'bins' can be not-None bins : int or sequence[float] Number of equal-width (valuewise) bins to use in factor bucketing. Alternately sequence of bin edges allowing for non-uniform bin width e.g. [-4, -2, -0.5, 0, 10] Chooses the buckets to be evenly spaced according to the values themselves. Useful when the factor contains discrete values. Only one of 'quantiles' or 'bins' can be not-None periods : sequence[int] periods to compute forward returns on. filter_zscore : int or float, optional Sets forward returns greater than X standard deviations from the the mean to nan. Set it to 'None' to avoid filtering. Caution: this outlier filtering incorporates lookahead bias. groupby_labels : dict A dictionary keyed by group code with values corresponding to the display name for each group. max_loss : float, optional Maximum percentage (0.00 to 1.00) of factor data dropping allowed, computed comparing the number of items in the input factor index and the number of items in the output DataFrame index. Factor data can be partially dropped due to being flawed itself (e.g. NaNs), not having provided enough price data to compute forward returns for all factor values, or because it is not possible to perform binning. Set max_loss=0 to avoid Exceptions suppression. zero_aware : bool, optional If True, compute quantile buckets separately for positive and negative signal values. This is useful if your signal is centered and zero is the separation between long and short signals, respectively. cumulative_returns : bool, optional If True, forward returns columns will contain cumulative returns. Setting this to False is useful if you want to analyze how predictive a factor is for a single forward day. Returns ------- merged_data : pd.DataFrame - MultiIndex A MultiIndex Series indexed by date (level 0) and asset (level 1), containing the values for a single alpha factor, forward returns for each period, the factor quantile/bin that factor value belongs to, and (optionally) the group the asset belongs to. - forward returns column names follow the format accepted by pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc) - 'date' index freq property (merged_data.index.levels[0].freq) will be set to a trading calendar (pandas DateOffset) inferred from the input data (see infer_trading_calendar for more details). This is currently used only in cumulative returns computation :: ------------------------------------------------------------------- | | 1D | 5D | 10D |factor|group|factor_quantile ------------------------------------------------------------------- date | asset | | | | | | ------------------------------------------------------------------- | AAPL | 0.09|-0.01|-0.079| 0.5 | G1 | 3 -------------------------------------------------------- | BA | 0.02| 0.06| 0.020| -1.1 | G2 | 5 -------------------------------------------------------- 2014-01-01 | CMG | 0.03| 0.09| 0.036| 1.7 | G2 | 1 -------------------------------------------------------- | DAL |-0.02|-0.06|-0.029| -0.1 | G3 | 5 -------------------------------------------------------- | LULU |-0.03| 0.05|-0.009| 2.7 | G1 | 2 -------------------------------------------------------- """ forward_returns = compute_forward_returns(factor, prices, periods, filter_zscore, cumulative_returns) factor_data = get_clean_factor(factor, forward_returns, groupby=groupby, groupby_labels=groupby_labels, quantiles=quantiles, bins=bins, binning_by_group=binning_by_group, max_loss=max_loss, zero_aware=zero_aware) return factor_data
Formats the factor data, pricing data, and group mappings into a DataFrame that contains aligned MultiIndex indices of timestamp and asset. The returned data will be formatted to be suitable for Alphalens functions. It is safe to skip a call to this function and still make use of Alphalens functionalities as long as the factor data conforms to the format returned from get_clean_factor_and_forward_returns and documented here Parameters ---------- factor : pd.Series - MultiIndex A MultiIndex Series indexed by timestamp (level 0) and asset (level 1), containing the values for a single alpha factor. :: ----------------------------------- date | asset | ----------------------------------- | AAPL | 0.5 ----------------------- | BA | -1.1 ----------------------- 2014-01-01 | CMG | 1.7 ----------------------- | DAL | -0.1 ----------------------- | LULU | 2.7 ----------------------- prices : pd.DataFrame A wide form Pandas DataFrame indexed by timestamp with assets in the columns. Pricing data must span the factor analysis time period plus an additional buffer window that is greater than the maximum number of expected periods in the forward returns calculations. It is important to pass the correct pricing data in depending on what time of period your signal was generated so to avoid lookahead bias, or delayed calculations. 'Prices' must contain at least an entry for each timestamp/asset combination in 'factor'. This entry should reflect the buy price for the assets and usually it is the next available price after the factor is computed but it can also be a later price if the factor is meant to be traded later (e.g. if the factor is computed at market open but traded 1 hour after market open the price information should be 1 hour after market open). 'Prices' must also contain entries for timestamps following each timestamp/asset combination in 'factor', as many more timestamps as the maximum value in 'periods'. The asset price after 'period' timestamps will be considered the sell price for that asset when computing 'period' forward returns. :: ---------------------------------------------------- | AAPL | BA | CMG | DAL | LULU | ---------------------------------------------------- Date | | | | | | ---------------------------------------------------- 2014-01-01 |605.12| 24.58| 11.72| 54.43 | 37.14 | ---------------------------------------------------- 2014-01-02 |604.35| 22.23| 12.21| 52.78 | 33.63 | ---------------------------------------------------- 2014-01-03 |607.94| 21.68| 14.36| 53.94 | 29.37 | ---------------------------------------------------- groupby : pd.Series - MultiIndex or dict Either A MultiIndex Series indexed by date and asset, containing the period wise group codes for each asset, or a dict of asset to group mappings. If a dict is passed, it is assumed that group mappings are unchanged for the entire time period of the passed factor data. binning_by_group : bool If True, compute quantile buckets separately for each group. This is useful when the factor values range vary considerably across gorups so that it is wise to make the binning group relative. You should probably enable this if the factor is intended to be analyzed for a group neutral portfolio quantiles : int or sequence[float] Number of equal-sized quantile buckets to use in factor bucketing. Alternately sequence of quantiles, allowing non-equal-sized buckets e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95] Only one of 'quantiles' or 'bins' can be not-None bins : int or sequence[float] Number of equal-width (valuewise) bins to use in factor bucketing. Alternately sequence of bin edges allowing for non-uniform bin width e.g. [-4, -2, -0.5, 0, 10] Chooses the buckets to be evenly spaced according to the values themselves. Useful when the factor contains discrete values. Only one of 'quantiles' or 'bins' can be not-None periods : sequence[int] periods to compute forward returns on. filter_zscore : int or float, optional Sets forward returns greater than X standard deviations from the the mean to nan. Set it to 'None' to avoid filtering. Caution: this outlier filtering incorporates lookahead bias. groupby_labels : dict A dictionary keyed by group code with values corresponding to the display name for each group. max_loss : float, optional Maximum percentage (0.00 to 1.00) of factor data dropping allowed, computed comparing the number of items in the input factor index and the number of items in the output DataFrame index. Factor data can be partially dropped due to being flawed itself (e.g. NaNs), not having provided enough price data to compute forward returns for all factor values, or because it is not possible to perform binning. Set max_loss=0 to avoid Exceptions suppression. zero_aware : bool, optional If True, compute quantile buckets separately for positive and negative signal values. This is useful if your signal is centered and zero is the separation between long and short signals, respectively. cumulative_returns : bool, optional If True, forward returns columns will contain cumulative returns. Setting this to False is useful if you want to analyze how predictive a factor is for a single forward day. Returns ------- merged_data : pd.DataFrame - MultiIndex A MultiIndex Series indexed by date (level 0) and asset (level 1), containing the values for a single alpha factor, forward returns for each period, the factor quantile/bin that factor value belongs to, and (optionally) the group the asset belongs to. - forward returns column names follow the format accepted by pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc) - 'date' index freq property (merged_data.index.levels[0].freq) will be set to a trading calendar (pandas DateOffset) inferred from the input data (see infer_trading_calendar for more details). This is currently used only in cumulative returns computation :: ------------------------------------------------------------------- | | 1D | 5D | 10D |factor|group|factor_quantile ------------------------------------------------------------------- date | asset | | | | | | ------------------------------------------------------------------- | AAPL | 0.09|-0.01|-0.079| 0.5 | G1 | 3 -------------------------------------------------------- | BA | 0.02| 0.06| 0.020| -1.1 | G2 | 5 -------------------------------------------------------- 2014-01-01 | CMG | 0.03| 0.09| 0.036| 1.7 | G2 | 1 -------------------------------------------------------- | DAL |-0.02|-0.06|-0.029| -0.1 | G3 | 5 -------------------------------------------------------- | LULU |-0.03| 0.05|-0.009| 2.7 | G1 | 2 --------------------------------------------------------
def get_selection(self, name="default"): """Get the current selection object (mostly for internal use atm).""" name = _normalize_selection_name(name) selection_history = self.selection_histories[name] index = self.selection_history_indices[name] if index == -1: return None else: return selection_history[index]
Get the current selection object (mostly for internal use atm).
def wait_to_end(self, pids=[]): ''' wait_to_end(self, pids=[]) Wait for processes to finish :Parameters: * *pids* (`list`) -- list of processes to wait to finish ''' actual_pids = self._get_pids(pids) return self.wait_for(pids=actual_pids, status_list=process_result_statuses)
wait_to_end(self, pids=[]) Wait for processes to finish :Parameters: * *pids* (`list`) -- list of processes to wait to finish
def get_plugins(**kwargs): """ Get all available plugins """ plugins = [] plugin_paths = [] #Look in directory or set of directories for #plugins base_plugin_dir = config.get('plugin', 'default_directory') plugin_xsd_path = config.get('plugin', 'plugin_xsd_path') base_plugin_dir_contents = os.listdir(base_plugin_dir) for directory in base_plugin_dir_contents: #ignore hidden files if directory[0] == '.' or directory == 'xml': continue #Is this a file or a directory? If it's a directory, it's a plugin. path = os.path.join(base_plugin_dir, directory) if os.path.isdir(path): plugin_paths.append(path) #For each plugin, get its details (an XML string) #Retrieve the xml schema for validating the XML to make sure #what is being provided to the IU is correct. xmlschema_doc = etree.parse(plugin_xsd_path) xmlschema = etree.XMLSchema(xmlschema_doc) #Get the xml description file from the plugin directory. If there #is no xml file, the plugin in unusable. for plugin_dir in plugin_paths: full_plugin_path = os.path.join(plugin_dir, 'trunk') dir_contents = os.listdir(full_plugin_path) #look for a plugin.xml file in the plugin directory for file_name in dir_contents: file_path = os.path.join(full_plugin_path, file_name) if file_name == 'plugin.xml': f = open(file_path, 'r') #validate the xml using the xml schema for defining #plugin details try: y = open(file_path, 'r') xml_tree = etree.parse(y) xmlschema.assertValid(xml_tree) plugins.append(etree.tostring(xml_tree)) except Exception as e: log.critical("Schema %s did not validate! (error was %s)"%(file_name, e)) break else: log.warning("No xml plugin details found for %s. Ignoring", plugin_dir) return plugins
Get all available plugins
def supportsType(self, type_uri): """Does this endpoint support this type? I consider C{/server} endpoints to implicitly support C{/signon}. """ return ( (type_uri in self.type_uris) or (type_uri == OPENID_2_0_TYPE and self.isOPIdentifier()) )
Does this endpoint support this type? I consider C{/server} endpoints to implicitly support C{/signon}.
def _generate_token(self): """Create authentation to use with requests.""" session = self.get_session() url = self.__base_url('magicBox.cgi?action=getMachineName') try: # try old basic method auth = requests.auth.HTTPBasicAuth(self._user, self._password) req = session.get(url, auth=auth, timeout=self._timeout_default) if not req.ok: # try new digest method auth = requests.auth.HTTPDigestAuth( self._user, self._password) req = session.get( url, auth=auth, timeout=self._timeout_default) req.raise_for_status() except requests.RequestException as error: _LOGGER.error(error) raise CommError('Could not communicate with camera') # check if user passed result = req.text.lower() if 'invalid' in result or 'error' in result: _LOGGER.error('Result from camera: %s', req.text.strip().replace('\r\n', ': ')) raise LoginError('Invalid credentials') return auth
Create authentation to use with requests.
def run(self, eps=1e-4, kill=True, max_steps=50, verbose=False): r"""Perform the clustering on the input components updating the initial guess. The result is available in the member ``self.g``. Return the number of iterations at convergence, or None. :param eps: If relative change of distance between current and last step falls below ``eps``, declare convergence: .. math:: 0 < \frac{d^t - d^{t-1}}{d^t} < \varepsilon :param kill: If a component is assigned zero weight (no input components), it is removed. :param max_steps: Perform a maximum number of update steps. :param verbose: Output information on progress of algorithm. """ old_distance = np.finfo(np.float64).max new_distance = np.finfo(np.float64).max if verbose: print('Starting hierarchical clustering with %d components.' % len(self.g.components)) converged = False for step in range(1, max_steps + 1): self._cleanup(kill, verbose) self._regroup() self._refit() new_distance = self._distance() assert new_distance >= 0, 'Found non-positive distance %d' % new_distance if verbose: print('Distance in step %d: %g' % (step, new_distance)) if new_distance == old_distance: converged = True if verbose: print('Exact minimum found after %d steps' % step) break rel_change = (old_distance - new_distance) / old_distance assert not (rel_change < -1e-13), 'distance increased' if rel_change < eps and not converged and step > 0: converged = True if verbose and new_distance != old_distance: print('Close enough to local minimum after %d steps' % step) break # save distance for comparison in next step old_distance = new_distance self._cleanup(kill, verbose) if verbose: print('%d components remain.' % len(self.g.components)) if converged: return step
r"""Perform the clustering on the input components updating the initial guess. The result is available in the member ``self.g``. Return the number of iterations at convergence, or None. :param eps: If relative change of distance between current and last step falls below ``eps``, declare convergence: .. math:: 0 < \frac{d^t - d^{t-1}}{d^t} < \varepsilon :param kill: If a component is assigned zero weight (no input components), it is removed. :param max_steps: Perform a maximum number of update steps. :param verbose: Output information on progress of algorithm.
def _padding_to_conv_op_padding(padding): """Whether to use SAME or VALID for the underlying convolution op. Args: padding: A tuple of members of ALLOWED_PADDINGS, e.g. as returned from `_fill_and_verify_padding`. Returns: One of CONV_OP_ALLOWED_PADDINGS, the padding method to use for the underlying convolution op. Raises: ValueError: If padding is not a tuple. """ if not isinstance(padding, tuple): raise ValueError("padding should be a tuple.") if all(p == SAME for p in padding): # If we want SAME padding for all dimensions then we can use SAME for the # conv and avoid doing any extra padding. return SAME else: # Otherwise we prefer to use VALID, since we can implement all the other # padding types just by adding some extra padding before doing a VALID conv. # (We could use SAME but then we'd also have to crop outputs in some cases). return VALID
Whether to use SAME or VALID for the underlying convolution op. Args: padding: A tuple of members of ALLOWED_PADDINGS, e.g. as returned from `_fill_and_verify_padding`. Returns: One of CONV_OP_ALLOWED_PADDINGS, the padding method to use for the underlying convolution op. Raises: ValueError: If padding is not a tuple.
def renew_item(self, item, expiration): """Update the expiration time for ``item``. The item will remain checked out for ``expiration`` seconds beyond the current time. This queue instance must have already checked out ``item``, and this method can fail if ``item`` is already overdue. """ conn = self._conn() self._run_expiration(conn) expiration += time.time() script = conn.register_script(""" -- already expired? if redis.call("hget", KEYS[2], "i" .. ARGV[1]) ~= "w" .. ARGV[3] then return -1 end -- otherwise just update the expiration redis.call("zadd", KEYS[1], ARGV[2], ARGV[1]) return 0 """) result = script(keys=[self._key_expiration(), self._key_workers()], args=[item, expiration, self._get_worker_id(conn)]) if result == -1: raise LostLease(item) return
Update the expiration time for ``item``. The item will remain checked out for ``expiration`` seconds beyond the current time. This queue instance must have already checked out ``item``, and this method can fail if ``item`` is already overdue.
def _run_all(cmd, log_lvl=None, log_msg=None, exitcode=0): ''' Simple wrapper around cmd.run_all log_msg can contain {0} for stderr :return: True or stdout, False if retcode wasn't exitcode ''' res = __salt__['cmd.run_all'](cmd) if res['retcode'] == exitcode: if res['stdout']: return res['stdout'] else: return True if log_lvl is not None: log.log(LOG[log_lvl], log_msg, res['stderr']) return False
Simple wrapper around cmd.run_all log_msg can contain {0} for stderr :return: True or stdout, False if retcode wasn't exitcode
def _normalize_dir(string_): ''' Normalize the directory to make comparison possible ''' return os.path.normpath(salt.utils.stringutils.to_unicode(string_))
Normalize the directory to make comparison possible
def do_group(self): """ Do grouping on register """ group_id = self.config.group systems = {'machine_id': generate_machine_id()} self.group_systems(group_id, systems)
Do grouping on register
def personsAtHome(self, home=None): """ Return the list of known persons who are currently at home """ if not home: home = self.default_home home_data = self.homeByName(home) atHome = [] for p in home_data['persons']: #Only check known persons if 'pseudo' in p: if not p["out_of_sight"]: atHome.append(p['pseudo']) return atHome
Return the list of known persons who are currently at home
def safe_copyfile(src, dest): """safely copy src to dest using a temporary intermediate and then renaming to dest""" fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(dest)) shutil.copyfileobj(open(src, 'rb'), os.fdopen(fd, 'wb')) shutil.copystat(src, tmpname) os.rename(tmpname, dest)
safely copy src to dest using a temporary intermediate and then renaming to dest
def get_resource_query_session(self, proxy): """Gets a resource query session. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.resource.ResourceQuerySession) - ``a ResourceQuerySession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_resource_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_resource_query()`` is ``true``.* """ if not self.supports_resource_query(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.ResourceQuerySession(proxy=proxy, runtime=self._runtime)
Gets a resource query session. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.resource.ResourceQuerySession) - ``a ResourceQuerySession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_resource_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_resource_query()`` is ``true``.*
def _raise_from_invalid_response(error): """Re-wrap and raise an ``InvalidResponse`` exception. :type error: :exc:`google.resumable_media.InvalidResponse` :param error: A caught exception from the ``google-resumable-media`` library. :raises: :class:`~google.cloud.exceptions.GoogleCloudError` corresponding to the failed status code """ response = error.response error_message = str(error) message = u"{method} {url}: {error}".format( method=response.request.method, url=response.request.url, error=error_message ) raise exceptions.from_http_status(response.status_code, message, response=response)
Re-wrap and raise an ``InvalidResponse`` exception. :type error: :exc:`google.resumable_media.InvalidResponse` :param error: A caught exception from the ``google-resumable-media`` library. :raises: :class:`~google.cloud.exceptions.GoogleCloudError` corresponding to the failed status code
def logsumexp(arr, axis=0): """Computes the sum of arr assuming arr is in the log domain. Returns log(sum(exp(arr))) while minimizing the possibility of over/underflow. Examples -------- >>> import numpy as np >>> from sklearn.utils.extmath import logsumexp >>> a = np.arange(10) >>> np.log(np.sum(np.exp(a))) 9.4586297444267107 >>> logsumexp(a) 9.4586297444267107 """ arr = np.rollaxis(arr, axis) # Use the max to normalize, as with the log this is what accumulates # the less errors vmax = arr.max(axis=0) out = np.log(np.sum(np.exp(arr - vmax), axis=0)) out += vmax return out
Computes the sum of arr assuming arr is in the log domain. Returns log(sum(exp(arr))) while minimizing the possibility of over/underflow. Examples -------- >>> import numpy as np >>> from sklearn.utils.extmath import logsumexp >>> a = np.arange(10) >>> np.log(np.sum(np.exp(a))) 9.4586297444267107 >>> logsumexp(a) 9.4586297444267107
def from_dict(data, ctx): """ Instantiate a new GuaranteedStopLossOrderLevelRestriction from a dict (generally from loading a JSON response). The data used to instantiate the GuaranteedStopLossOrderLevelRestriction is a shallow copy of the dict passed in, with any complex child types instantiated appropriately. """ data = data.copy() if data.get('volume') is not None: data['volume'] = ctx.convert_decimal_number( data.get('volume') ) if data.get('priceRange') is not None: data['priceRange'] = ctx.convert_decimal_number( data.get('priceRange') ) return GuaranteedStopLossOrderLevelRestriction(**data)
Instantiate a new GuaranteedStopLossOrderLevelRestriction from a dict (generally from loading a JSON response). The data used to instantiate the GuaranteedStopLossOrderLevelRestriction is a shallow copy of the dict passed in, with any complex child types instantiated appropriately.
def get_stories(self, story_type='', limit=30): """ Yields a list of stories from the passed page of HN. 'story_type' can be: \t'' = top stories (homepage) (default) \t'news2' = page 2 of top stories \t'newest' = most recent stories \t'best' = best stories 'limit' is the number of stories required from the given page. Defaults to 30. Cannot be more than 30. """ if limit is None or limit < 1 or limit > 30: # we need at least 30 items limit = 30 stories_found = 0 # self.more = story_type # while we still have more stories to find while stories_found < limit: # get current page soup soup = get_soup(page=story_type) all_rows = self._get_zipped_rows(soup) # get a list of stories on current page stories = self._build_story(all_rows) # move to next page # self.more = self._get_next_page(soup) for story in stories: yield story stories_found += 1 # if enough stories found, return if stories_found == limit: return
Yields a list of stories from the passed page of HN. 'story_type' can be: \t'' = top stories (homepage) (default) \t'news2' = page 2 of top stories \t'newest' = most recent stories \t'best' = best stories 'limit' is the number of stories required from the given page. Defaults to 30. Cannot be more than 30.
def copy_and_sum_families(family_source, family_target): """ methods iterates thru source family and copies its entries to target family in case key already exists in both families - then the values are added""" for every in family_source: if every not in family_target: family_target[every] = family_source[every] else: family_target[every] += family_source[every]
methods iterates thru source family and copies its entries to target family in case key already exists in both families - then the values are added