Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
379,000
def is_square_matrix(mat): mat = np.array(mat) if mat.ndim != 2: return False shape = mat.shape return shape[0] == shape[1]
Test if an array is a square matrix.
379,001
def _login(session): resp = session.get(LOGIN_URL, params=_get_params(session.auth.locale)) parsed = BeautifulSoup(resp.text, HTML_PARSER) csrf = parsed.find(CSRF_FIND_TAG, CSRF_FIND_ATTR).get(VALUE_ATTR) resp = session.post(LOGIN_URL, { : session.auth.username, : session.auth.password, : , : csrf, : session.auth.locale }) if resp.status_code == 403: raise UPSError() parsed = BeautifulSoup(resp.text, HTML_PARSER) error = parsed.find(ERROR_FIND_TAG, ERROR_FIND_ATTR) if error and error.string: raise UPSError(error.string.strip()) _save_cookies(session.cookies, session.auth.cookie_path)
Login to UPS.
379,002
def view_sbo(self): sbo_url = self.sbo_url.replace("/slackbuilds/", "/repository/") br1, br2, fix_sp = "", "", " " if self.meta.use_colors in ["off", "OFF"]: br1 = "(" br2 = ")" fix_sp = "" print("") self.msg.template(78) print("| {0}{1}SlackBuilds Repository{2}".format(" " * 28, self.grey, self.endc)) self.msg.template(78) print("| {0} > {1} > {2}{3}{4}".format(slack_ver(), sbo_url.split("/")[-3].title(), self.cyan, self.name, self.endc)) self.msg.template(78) print("| {0}Package url{1}: {2}".format(self.green, self.endc, sbo_url)) self.msg.template(78) print("| {0}Description: {1}{2}".format(self.green, self.endc, self.sbo_desc)) print("| {0}SlackBuild: {1}{2}".format(self.green, self.endc, self.sbo_dwn.split("/")[-1])) print("| {0}Sources: {1}{2}".format( self.green, self.endc, (", ".join([src.split("/")[-1] for src in self.source_dwn])))) print("| {0}Requirements: {1}{2}".format(self.yellow, self.endc, ", ".join(self.sbo_req))) self.msg.template(78) print("| {0}R{1}{2}EADME View the README file".format( self.red, self.endc, br2)) print("| {0}S{1}{2}lackBuild View the .SlackBuild " "file".format(self.red, self.endc, br2)) print("| In{0}{1}f{2}{3}o{4} View the .info " "file".format(br1, self.red, self.endc, br2, fix_sp)) if "doinst.sh" in self.sbo_files.split(): print("| D{0}{1}o{2}{3}inst.sh{4} View the doinst.sh " "file".format(br1, self.red, self.endc, br2, fix_sp)) print("| {0}D{1}{2}ownload Download this package".format( self.red, self.endc, br2)) print("| {0}B{1}{2}uild Download and build".format( self.red, self.endc, br2)) print("| {0}I{1}{2}nstall Download/Build/Install".format( self.red, self.endc, br2)) print("| {0}C{1}{2}lear Clear screen".format(self.red, self.endc, br2)) print("| {0}Q{1}{2}uit Quit".format(self.red, self.endc, br2)) self.msg.template(78)
View slackbuild.org
379,003
def match(self, path): match = self._re.search(path) if match is None: return None kwargs_indexes = match.re.groupindex.values() args_indexes = [i for i in range(1, match.re.groups + 1) if i not in kwargs_indexes] args = [match.group(i) for i in args_indexes] kwargs = {} for name, index in match.re.groupindex.items(): kwargs[name] = match.group(index) return self._callback, args, kwargs
Return route handler with arguments if path matches this route. Arguments: path (str): Request path Returns: tuple or None: A tuple of three items: 1. Route handler (callable) 2. Positional arguments (list) 3. Keyword arguments (dict) ``None`` if the route does not match the path.
379,004
def del_application(self, application, sync=True): LOGGER.debug("Team.del_application") if not sync: self.app_2_rm.append(application) else: if application.id is None: application.sync() if self.id is not None and application.id is not None: params = { : self.id, : application.id } args = {: , : , : params} response = TeamService.requester.call(args) if response.rc != 0: LOGGER.warning( + self.name + + str(response.response_content) + + str(response.error_message) + " (" + str(response.rc) + ")" ) else: self.app_ids.remove(application.id) application.sync() else: LOGGER.warning( + self.name + + application.name + )
delete application from this team :param application: the application to be deleted from this team :param sync: If sync=True(default) synchronize with Ariane server. If sync=False, add the application object on list to be removed on next save(). :return:
379,005
def dataframe(self): fields_to_include = { : self.abbreviation, : self.assist_percentage, : self.assists, : self.away_losses, : self.away_wins, : self.block_percentage, : self.blocks, : self.conference, : self.conference_losses, : self.conference_wins, : self.defensive_rebounds, : self.effective_field_goal_percentage, : self.field_goal_attempts, : self.field_goal_percentage, : self.field_goals, : self.free_throw_attempt_rate, : self.free_throw_attempts, : self.free_throw_percentage, : self.free_throws, : self.free_throws_per_field_goal_attempt, : self.games_played, : self.home_losses, : self.home_wins, : self.losses, : self.minutes_played, : self.name, : self.net_rating, : self.offensive_rating, : self.offensive_rebound_percentage, : self.offensive_rebounds, : self.opp_assist_percentage, : self.opp_assists, : self.opp_block_percentage, : self.opp_blocks, : self.opp_defensive_rebounds, : self.opp_effective_field_goal_percentage, : self.opp_field_goal_attempts, : self.opp_field_goal_percentage, : self.opp_field_goals, : self.opp_free_throw_attempt_rate, : self.opp_free_throw_attempts, : self.opp_free_throw_percentage, : self.opp_free_throws, : self.opp_free_throws_per_field_goal_attempt, : self.opp_offensive_rating, : self.opp_offensive_rebound_percentage, : self.opp_offensive_rebounds, : self.opp_personal_fouls, : self.opp_points, : self.opp_steal_percentage, : self.opp_steals, : self.opp_three_point_attempt_rate, : self.opp_three_point_field_goal_attempts, : self.opp_three_point_field_goal_percentage, : self.opp_three_point_field_goals, : self.opp_two_point_field_goal_attempts, : self.opp_two_point_field_goal_percentage, : self.opp_two_point_field_goals, : self.opp_total_rebound_percentage, : self.opp_total_rebounds, : self.opp_true_shooting_percentage, : self.opp_turnover_percentage, : self.opp_turnovers, : self.pace, : self.personal_fouls, : self.points, : self.simple_rating_system, : self.steal_percentage, : self.steals, : self.strength_of_schedule, : self.three_point_attempt_rate, : self.three_point_field_goal_attempts, : self.three_point_field_goal_percentage, : self.three_point_field_goals, : self.two_point_field_goal_attempts, : self.two_point_field_goal_percentage, : self.two_point_field_goals, : self.total_rebound_percentage, : self.total_rebounds, : self.true_shooting_percentage, : self.turnover_percentage, : self.turnovers, : self.win_percentage, : self.wins } return pd.DataFrame([fields_to_include], index=[self._abbreviation])
Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the string abbreviation of the team, such as 'PURDUE'.
379,006
def DictReader(file_obj, columns=None): footer = _read_footer(file_obj) keys = columns if columns else [s.name for s in footer.schema if s.type] for row in reader(file_obj, columns): yield OrderedDict(zip(keys, row))
Reader for a parquet file object. This function is a generator returning an OrderedDict for each row of data in the parquet file. Nested values will be flattend into the top-level dict and can be referenced with '.' notation (e.g. 'foo' -> 'bar' is referenced as 'foo.bar') :param file_obj: the file containing parquet data :param columns: the columns to include. If None (default), all columns are included. Nested values are referenced with "." notation
379,007
def restore_event(self, requestId): with self.__requests: if requestId not in self.__requests: self.__requests[requestId] = RequestEvent(requestId) return True return False
restore an event based on the requestId. For example if the user app had to shutdown with pending requests. The user can rebuild the Events they were waiting for based on the requestId(s).
379,008
def on_persist_completed(self, block): if len(self._events_to_write): addr_db = self.db.prefixed_db(NotificationPrefix.PREFIX_ADDR) block_db = self.db.prefixed_db(NotificationPrefix.PREFIX_BLOCK) contract_db = self.db.prefixed_db(NotificationPrefix.PREFIX_CONTRACT) block_write_batch = block_db.write_batch() contract_write_batch = contract_db.write_batch() block_count = 0 block_bytes = self._events_to_write[0].block_number.to_bytes(4, ) for evt in self._events_to_write: write_both = True hash_data = evt.ToByteArray() bytes_to = bytes(evt.addr_to.Data) bytes_from = bytes(evt.addr_from.Data) if bytes_to == bytes_from: write_both = False total_bytes_to = addr_db.get(bytes_to + NotificationPrefix.PREFIX_COUNT) total_bytes_from = addr_db.get(bytes_from + NotificationPrefix.PREFIX_COUNT) if not total_bytes_to: total_bytes_to = b if not total_bytes_from: total_bytes_from = b addr_to_key = bytes_to + total_bytes_to addr_from_key = bytes_from + total_bytes_from with addr_db.write_batch() as b: b.put(addr_to_key, hash_data) if write_both: b.put(addr_from_key, hash_data) total_bytes_to = int.from_bytes(total_bytes_to, ) + 1 total_bytes_from = int.from_bytes(total_bytes_from, ) + 1 new_bytes_to = total_bytes_to.to_bytes(4, ) new_bytes_from = total_bytes_from.to_bytes(4, ) b.put(bytes_to + NotificationPrefix.PREFIX_COUNT, new_bytes_to) if write_both: b.put(bytes_from + NotificationPrefix.PREFIX_COUNT, new_bytes_from) per_block_key = block_bytes + block_count.to_bytes(4, ) block_write_batch.put(per_block_key, hash_data) block_count += 1 contract_bytes = bytes(evt.contract_hash.Data) count_for_contract = contract_db.get(contract_bytes + NotificationPrefix.PREFIX_COUNT) if not count_for_contract: count_for_contract = b contract_event_key = contract_bytes + count_for_contract contract_count_int = int.from_bytes(count_for_contract, ) + 1 new_contract_count = contract_count_int.to_bytes(4, ) contract_write_batch.put(contract_bytes + NotificationPrefix.PREFIX_COUNT, new_contract_count) contract_write_batch.put(contract_event_key, hash_data) block_write_batch.write() contract_write_batch.write() self._events_to_write = [] if len(self._new_contracts_to_write): token_db = self.db.prefixed_db(NotificationPrefix.PREFIX_TOKEN) token_write_batch = token_db.write_batch() for token_event in self._new_contracts_to_write: try: hash_data = token_event.ToByteArray() hash_key = token_event.contract.Code.ScriptHash().ToBytes() token_write_batch.put(hash_key, hash_data) except Exception as e: logger.debug(f"Failed to write new contract, reason: {e}") token_write_batch.write() self._new_contracts_to_write = []
Called when a block has been persisted to disk. Used as a hook to persist notification data. Args: block (neo.Core.Block): the currently persisting block
379,009
def create_json_archive(self): archive_data = {"packets": self.recv_msgs, "dataset": self.dataset_name, "num_packets": len(self.recv_msgs), "created": rnow()} self.write_to_file(archive_data, self.archive_file)
create_json_archive
379,010
def smart_convert(original, colorkey, pixelalpha): tile_size = original.get_size() threshold = 127 try: px = pygame.mask.from_surface(original, threshold).count() except: return original.convert_alpha() if px == tile_size[0] * tile_size[1]: tile = original.convert() elif colorkey: tile = original.convert() tile.set_colorkey(colorkey, pygame.RLEACCEL) elif pixelalpha: tile = original.convert_alpha() else: tile = original.convert() return tile
this method does several tests on a surface to determine the optimal flags and pixel format for each tile surface. this is done for the best rendering speeds and removes the need to convert() the images on your own
379,011
def run(self, executable: Executable, memory_map: Dict[str, List[Union[int, float]]] = None) -> np.ndarray: self.qam.load(executable) if memory_map: for region_name, values_list in memory_map.items(): for offset, value in enumerate(values_list): self.qam.write_memory(region_name=region_name, offset=offset, value=value) return self.qam.run() \ .wait() \ .read_memory(region_name=)
Run a quil executable. If the executable contains declared parameters, then a memory map must be provided, which defines the runtime values of these parameters. :param executable: The program to run. You are responsible for compiling this first. :param memory_map: The mapping of declared parameters to their values. The values are a list of floats or integers. :return: A numpy array of shape (trials, len(ro-register)) that contains 0s and 1s.
379,012
def edit( cls, record, parent = None, uifile = , commit = True ): dlg = QDialog(parent) dlg.setWindowTitle( % record.schema().name()) cls = record.schema().property(, cls) widget = cls(dlg) if ( uifile ): widget.setUiFile(uifile) widget.setRecord(record) widget.layout().setContentsMargins(0, 0, 0, 0) opts = QDialogButtonBox.Save | QDialogButtonBox.Cancel btns = QDialogButtonBox(opts, Qt.Horizontal, dlg) layout = QVBoxLayout() layout.addWidget(widget) layout.addWidget(btns) dlg.setLayout(layout) dlg.adjustSize() btns.rejected.connect(dlg.reject) widget.saved.connect(dlg.accept) if ( dlg.exec_() ): if commit: result = widget.record().commit() if in result: QMessageBox.information(self.window(), , result[]) return False return True return False
Prompts the user to edit the inputed record. :param record | <orb.Table> parent | <QWidget> :return <bool> | accepted
379,013
def __set_rouge_dir(self, home_dir=None): if not home_dir: self._home_dir = self.__get_rouge_home_dir_from_settings() else: self._home_dir = home_dir self.save_home_dir() self._bin_path = os.path.join(self._home_dir, ) self.data_dir = os.path.join(self._home_dir, ) if not os.path.exists(self._bin_path): raise Exception( "ROUGE binary not found at {}. Please set the " "correct path by running pyrouge_set_rouge_path " "/path/to/rouge/home.".format(self._bin_path))
Verfify presence of ROUGE-1.5.5.pl and data folder, and set those paths.
379,014
def thumbnail(self, size): if size in self.thumbnail_sizes: return self.thumbnails.get(str(size)) else: raise ValueError(.format(size))
Get the thumbnail filename for a given size
379,015
def getContactItems(self, person): return person.store.query( EmailAddress, EmailAddress.person == person)
Return all L{EmailAddress} instances associated with the given person. @type person: L{Person}
379,016
def setColor(self, typeID, color): self._connection._beginMessage( tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_COLOR, typeID, 1 + 1 + 1 + 1 + 1) self._connection._string += struct.pack("!BBBBB", tc.TYPE_COLOR, int( color[0]), int(color[1]), int(color[2]), int(color[3])) self._connection._sendExact()
setColor(string, (integer, integer, integer, integer)) -> None Sets the color of this type.
379,017
def create_db_user(username, password=None, flags=None): flags = flags or u sudo(u % (flags, username), user=u) if password: change_db_user_password(username, password)
Create a databse user.
379,018
def search( self, query, accept_language=None, pragma=None, user_agent=None, client_id=None, client_ip=None, location=None, answer_count=None, country_code=None, count=None, freshness=None, market="en-us", offset=None, promote=None, response_filter=None, safe_search=None, set_lang=None, text_decorations=None, text_format=None, custom_headers=None, raw=False, **operation_config): url = self.search.metadata[] query_parameters = {} if answer_count is not None: query_parameters[] = self._serialize.query("answer_count", answer_count, ) if country_code is not None: query_parameters[] = self._serialize.query("country_code", country_code, ) if count is not None: query_parameters[] = self._serialize.query("count", count, ) if freshness is not None: query_parameters[] = self._serialize.query("freshness", freshness, ) if market is not None: query_parameters[] = self._serialize.query("market", market, ) if offset is not None: query_parameters[] = self._serialize.query("offset", offset, ) if promote is not None: query_parameters[] = self._serialize.query("promote", promote, , div=) query_parameters[] = self._serialize.query("query", query, ) if response_filter is not None: query_parameters[] = self._serialize.query("response_filter", response_filter, , div=) if safe_search is not None: query_parameters[] = self._serialize.query("safe_search", safe_search, ) if set_lang is not None: query_parameters[] = self._serialize.query("set_lang", set_lang, ) if text_decorations is not None: query_parameters[] = self._serialize.query("text_decorations", text_decorations, ) if text_format is not None: query_parameters[] = self._serialize.query("text_format", text_format, ) header_parameters = {} header_parameters[] = if custom_headers: header_parameters.update(custom_headers) header_parameters[] = self._serialize.header("self.x_bing_apis_sdk", self.x_bing_apis_sdk, ) if accept_language is not None: header_parameters[] = self._serialize.header("accept_language", accept_language, ) if pragma is not None: header_parameters[] = self._serialize.header("pragma", pragma, ) if user_agent is not None: header_parameters[] = self._serialize.header("user_agent", user_agent, ) if client_id is not None: header_parameters[] = self._serialize.header("client_id", client_id, ) if client_ip is not None: header_parameters[] = self._serialize.header("client_ip", client_ip, ) if location is not None: header_parameters[] = self._serialize.header("location", location, ) request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize(, response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
The Web Search API lets you send a search query to Bing and get back search results that include links to webpages, images, and more. :param query: The user's search query term. The term may not be empty. The term may contain Bing Advanced Operators. For example, to limit results to a specific domain, use the site: operator. :type query: str :param accept_language: A comma-delimited list of one or more languages to use for user interface strings. The list is in decreasing order of preference. For additional information, including expected format, see [RFC2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html). This header and the setLang query parameter are mutually exclusive; do not specify both. If you set this header, you must also specify the cc query parameter. Bing will use the first supported language it finds from the list, and combine that language with the cc parameter value to determine the market to return results for. If the list does not include a supported language, Bing will find the closest language and market that supports the request, and may use an aggregated or default market for the results instead of a specified one. You should use this header and the cc query parameter only if you specify multiple languages; otherwise, you should use the mkt and setLang query parameters. A user interface string is a string that's used as a label in a user interface. There are very few user interface strings in the JSON response objects. Any links in the response objects to Bing.com properties will apply the specified language. :type accept_language: str :param pragma: By default, Bing returns cached content, if available. To prevent Bing from returning cached content, set the Pragma header to no-cache (for example, Pragma: no-cache). :type pragma: str :param user_agent: The user agent originating the request. Bing uses the user agent to provide mobile users with an optimized experience. Although optional, you are strongly encouraged to always specify this header. The user-agent should be the same string that any commonly used browser would send. For information about user agents, see [RFC 2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html). :type user_agent: str :param client_id: Bing uses this header to provide users with consistent behavior across Bing API calls. Bing often flights new features and improvements, and it uses the client ID as a key for assigning traffic on different flights. If you do not use the same client ID for a user across multiple requests, then Bing may assign the user to multiple conflicting flights. Being assigned to multiple conflicting flights can lead to an inconsistent user experience. For example, if the second request has a different flight assignment than the first, the experience may be unexpected. Also, Bing can use the client ID to tailor web results to that client ID’s search history, providing a richer experience for the user. Bing also uses this header to help improve result rankings by analyzing the activity generated by a client ID. The relevance improvements help with better quality of results delivered by Bing APIs and in turn enables higher click-through rates for the API consumer. IMPORTANT: Although optional, you should consider this header required. Persisting the client ID across multiple requests for the same end user and device combination enables 1) the API consumer to receive a consistent user experience, and 2) higher click-through rates via better quality of results from the Bing APIs. Each user that uses your application on the device must have a unique, Bing generated client ID. If you do not include this header in the request, Bing generates an ID and returns it in the X-MSEdge-ClientID response header. The only time that you should NOT include this header in a request is the first time the user uses your app on that device. Use the client ID for each Bing API request that your app makes for this user on the device. Persist the client ID. To persist the ID in a browser app, use a persistent HTTP cookie to ensure the ID is used across all sessions. Do not use a session cookie. For other apps such as mobile apps, use the device's persistent storage to persist the ID. The next time the user uses your app on that device, get the client ID that you persisted. Bing responses may or may not include this header. If the response includes this header, capture the client ID and use it for all subsequent Bing requests for the user on that device. If you include the X-MSEdge-ClientID, you must not include cookies in the request. :type client_id: str :param client_ip: The IPv4 or IPv6 address of the client device. The IP address is used to discover the user's location. Bing uses the location information to determine safe search behavior. Although optional, you are encouraged to always specify this header and the X-Search-Location header. Do not obfuscate the address (for example, by changing the last octet to 0). Obfuscating the address results in the location not being anywhere near the device's actual location, which may result in Bing serving erroneous results. :type client_ip: str :param location: A semicolon-delimited list of key/value pairs that describe the client's geographical location. Bing uses the location information to determine safe search behavior and to return relevant local content. Specify the key/value pair as <key>:<value>. The following are the keys that you use to specify the user's location. lat (required): The latitude of the client's location, in degrees. The latitude must be greater than or equal to -90.0 and less than or equal to +90.0. Negative values indicate southern latitudes and positive values indicate northern latitudes. long (required): The longitude of the client's location, in degrees. The longitude must be greater than or equal to -180.0 and less than or equal to +180.0. Negative values indicate western longitudes and positive values indicate eastern longitudes. re (required): The radius, in meters, which specifies the horizontal accuracy of the coordinates. Pass the value returned by the device's location service. Typical values might be 22m for GPS/Wi-Fi, 380m for cell tower triangulation, and 18,000m for reverse IP lookup. ts (optional): The UTC UNIX timestamp of when the client was at the location. (The UNIX timestamp is the number of seconds since January 1, 1970.) head (optional): The client's relative heading or direction of travel. Specify the direction of travel as degrees from 0 through 360, counting clockwise relative to true north. Specify this key only if the sp key is nonzero. sp (optional): The horizontal velocity (speed), in meters per second, that the client device is traveling. alt (optional): The altitude of the client device, in meters. are (optional): The radius, in meters, that specifies the vertical accuracy of the coordinates. Specify this key only if you specify the alt key. Although many of the keys are optional, the more information that you provide, the more accurate the location results are. Although optional, you are encouraged to always specify the user's geographical location. Providing the location is especially important if the client's IP address does not accurately reflect the user's physical location (for example, if the client uses VPN). For optimal results, you should include this header and the X-MSEdge-ClientIP header, but at a minimum, you should include this header. :type location: str :param answer_count: The number of answers that you want the response to include. The answers that Bing returns are based on ranking. For example, if Bing returns webpages, images, videos, and relatedSearches for a request and you set this parameter to two (2), the response includes webpages and images.If you included the responseFilter query parameter in the same request and set it to webpages and news, the response would include only webpages. :type answer_count: int :param country_code: A 2-character country code of the country where the results come from. This API supports only the United States market. If you specify this query parameter, it must be set to us. If you set this parameter, you must also specify the Accept-Language header. Bing uses the first supported language it finds from the languages list, and combine that language with the country code that you specify to determine the market to return results for. If the languages list does not include a supported language, Bing finds the closest language and market that supports the request, or it may use an aggregated or default market for the results instead of a specified one. You should use this query parameter and the Accept-Language query parameter only if you specify multiple languages; otherwise, you should use the mkt and setLang query parameters. This parameter and the mkt query parameter are mutually exclusive—do not specify both. :type country_code: str :param count: The number of search results to return in the response. The default is 10 and the maximum value is 50. The actual number delivered may be less than requested.Use this parameter along with the offset parameter to page results.For example, if your user interface displays 10 search results per page, set count to 10 and offset to 0 to get the first page of results. For each subsequent page, increment offset by 10 (for example, 0, 10, 20). It is possible for multiple pages to include some overlap in results. :type count: int :param freshness: Filter search results by the following age values: Day—Return webpages that Bing discovered within the last 24 hours. Week—Return webpages that Bing discovered within the last 7 days. Month—Return webpages that discovered within the last 30 days. This filter applies only to webpage results and not to the other results such as news and images. Possible values include: 'Day', 'Week', 'Month' :type freshness: str or ~azure.cognitiveservices.search.websearch.models.Freshness :param market: The market where the results come from. Typically, mkt is the country where the user is making the request from. However, it could be a different country if the user is not located in a country where Bing delivers results. The market must be in the form <language code>-<country code>. For example, en-US. The string is case insensitive. If known, you are encouraged to always specify the market. Specifying the market helps Bing route the request and return an appropriate and optimal response. If you specify a market that is not listed in Market Codes, Bing uses a best fit market code based on an internal mapping that is subject to change. This parameter and the cc query parameter are mutually exclusive—do not specify both. :type market: str :param offset: The zero-based offset that indicates the number of search results to skip before returning results. The default is 0. The offset should be less than (totalEstimatedMatches - count). Use this parameter along with the count parameter to page results. For example, if your user interface displays 10 search results per page, set count to 10 and offset to 0 to get the first page of results. For each subsequent page, increment offset by 10 (for example, 0, 10, 20). it is possible for multiple pages to include some overlap in results. :type offset: int :param promote: A comma-delimited list of answers that you want the response to include regardless of their ranking. For example, if you set answerCount) to two (2) so Bing returns the top two ranked answers, but you also want the response to include news, you'd set promote to news. If the top ranked answers are webpages, images, videos, and relatedSearches, the response includes webpages and images because news is not a ranked answer. But if you set promote to video, Bing would promote the video answer into the response and return webpages, images, and videos. The answers that you want to promote do not count against the answerCount limit. For example, if the ranked answers are news, images, and videos, and you set answerCount to 1 and promote to news, the response contains news and images. Or, if the ranked answers are videos, images, and news, the response contains videos and news. Possible values are Computation, Images, News, RelatedSearches, SpellSuggestions, TimeZone, Videos, Webpages. Use only if you specify answerCount. :type promote: list[str or ~azure.cognitiveservices.search.websearch.models.AnswerType] :param response_filter: A comma-delimited list of answers to include in the response. If you do not specify this parameter, the response includes all search answers for which there's relevant data. Possible filter values are Computation, Images, News, RelatedSearches, SpellSuggestions, TimeZone, Videos, Webpages. Although you may use this filter to get a single answer, you should instead use the answer-specific endpoint in order to get richer results. For example, to receive only images, send the request to one of the Image Search API endpoints. The RelatedSearches and SpellSuggestions answers do not support a separate endpoint like the Image Search API does (only the Web Search API returns them). To include answers that would otherwise be excluded because of ranking, see the promote query parameter. :type response_filter: list[str or ~azure.cognitiveservices.search.websearch.models.AnswerType] :param safe_search: A filter used to filter adult content. Off: Return webpages with adult text, images, or videos. Moderate: Return webpages with adult text, but not adult images or videos. Strict: Do not return webpages with adult text, images, or videos. The default is Moderate. If the request comes from a market that Bing's adult policy requires that safeSearch is set to Strict, Bing ignores the safeSearch value and uses Strict. If you use the site: query operator, there is the chance that the response may contain adult content regardless of what the safeSearch query parameter is set to. Use site: only if you are aware of the content on the site and your scenario supports the possibility of adult content. Possible values include: 'Off', 'Moderate', 'Strict' :type safe_search: str or ~azure.cognitiveservices.search.websearch.models.SafeSearch :param set_lang: The language to use for user interface strings. Specify the language using the ISO 639-1 2-letter language code. For example, the language code for English is EN. The default is EN (English). Although optional, you should always specify the language. Typically, you set setLang to the same language specified by mkt unless the user wants the user interface strings displayed in a different language. This parameter and the Accept-Language header are mutually exclusive; do not specify both. A user interface string is a string that's used as a label in a user interface. There are few user interface strings in the JSON response objects. Also, any links to Bing.com properties in the response objects apply the specified language. :type set_lang: str :param text_decorations: A Boolean value that determines whether display strings should contain decoration markers such as hit highlighting characters. If true, the strings may include markers. The default is false. To specify whether to use Unicode characters or HTML tags as the markers, see the textFormat query parameter. :type text_decorations: bool :param text_format: The type of markers to use for text decorations (see the textDecorations query parameter). Possible values are Raw—Use Unicode characters to mark content that needs special formatting. The Unicode characters are in the range E000 through E019. For example, Bing uses E000 and E001 to mark the beginning and end of query terms for hit highlighting. HTML—Use HTML tags to mark content that needs special formatting. For example, use <b> tags to highlight query terms in display strings. The default is Raw. For display strings that contain escapable HTML characters such as <, >, and &, if textFormat is set to HTML, Bing escapes the characters as appropriate (for example, < is escaped to &lt;). Possible values include: 'Raw', 'Html' :type text_format: str or ~azure.cognitiveservices.search.websearch.models.TextFormat :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: SearchResponse or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.search.websearch.models.SearchResponse or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.cognitiveservices.search.websearch.models.ErrorResponseException>`
379,019
def _encrypt_message(self, msg, nonce, timestamp=None): xml = nonce = to_binary(nonce) timestamp = to_binary(timestamp) or to_binary(int(time.time())) encrypt = self.__pc.encrypt(to_text(msg), self.__id) signature = get_sha1_signature(self.__token, timestamp, nonce, encrypt) return to_text(xml.format( encrypt=to_text(encrypt), signature=to_text(signature), timestamp=to_text(timestamp), nonce=to_text(nonce) ))
将公众号回复用户的消息加密打包 :param msg: 待回复用户的消息,xml格式的字符串 :param nonce: 随机串,可以自己生成,也可以用URL参数的nonce :param timestamp: 时间戳,可以自己生成,也可以用URL参数的timestamp,如为None则自动用当前时间 :return: 加密后的可以直接回复用户的密文,包括msg_signature, timestamp, nonce, encrypt的xml格式的字符串
379,020
def getBurstingColumnsStats(self): traceData = self.tm.mmGetTraceUnpredictedActiveColumns().data resetData = self.tm.mmGetTraceResets().data countTrace = [] for x in xrange(len(traceData)): if not resetData[x]: countTrace.append(len(traceData[x])) mean = numpy.mean(countTrace) stdDev = numpy.std(countTrace) maximum = max(countTrace) return mean, stdDev, maximum
Gets statistics on the Temporal Memory's bursting columns. Used as a metric of Temporal Memory's learning performance. :return: mean, standard deviation, and max of Temporal Memory's bursting columns over time
379,021
def _method_response_handler(self, response: Dict[str, Any]): code = response.get("CODE") if code in (200, 300): self._result_handler(response) else: asyncio.ensure_future(self._gen_result_handler(response))
处理200~399段状态码,为对应的响应设置结果. Parameters: (response): - 响应的python字典形式数据 Return: (bool): - 准确地说没有错误就会返回True
379,022
def _brute_force_install_pip(self): if os.path.exists(self.pip_installer_fname): logger.debug("Using pip installer from %r", self.pip_installer_fname) else: logger.debug( "Installer for pip not found in %r, downloading it", self.pip_installer_fname) self._download_pip_installer() logger.debug("Installing PIP manually in the virtualenv") python_exe = os.path.join(self.env_bin_path, "python") helpers.logged_exec([python_exe, self.pip_installer_fname, ]) self.pip_installed = True
A brute force install of pip itself.
379,023
def find_one(self, cls, id): one = self._find(cls, {"_id": id}) if not one: return None return one[0]
Required functionality.
379,024
def from_packed(cls, packed): packed = np.asarray(packed) check_ndim(packed, 2) check_dtype(packed, ) packed = memoryview_safe(packed) data = genotype_array_unpack_diploid(packed) return cls(data)
Unpack diploid genotypes that have been bit-packed into single bytes. Parameters ---------- packed : ndarray, uint8, shape (n_variants, n_samples) Bit-packed diploid genotype array. Returns ------- g : GenotypeArray, shape (n_variants, n_samples, 2) Genotype array. Examples -------- >>> import allel >>> import numpy as np >>> packed = np.array([[0, 1], ... [2, 17], ... [34, 239]], dtype='u1') >>> allel.GenotypeArray.from_packed(packed) <GenotypeArray shape=(3, 2, 2) dtype=int8> 0/0 0/1 0/2 1/1 2/2 ./.
379,025
def radio_status_encode(self, rssi, remrssi, txbuf, noise, remnoise, rxerrors, fixed): return MAVLink_radio_status_message(rssi, remrssi, txbuf, noise, remnoise, rxerrors, fixed)
Status generated by radio and injected into MAVLink stream. rssi : Local signal strength (uint8_t) remrssi : Remote signal strength (uint8_t) txbuf : Remaining free buffer space in percent. (uint8_t) noise : Background noise level (uint8_t) remnoise : Remote background noise level (uint8_t) rxerrors : Receive errors (uint16_t) fixed : Count of error corrected packets (uint16_t)
379,026
def wait_until_visible(self, timeout=None): try: self.utils.wait_until_element_visible(self, timeout) except TimeoutException as exception: parent_msg = " and parent locator ".format(self.parent) if self.parent else msg = "Page element of type with locator %s%s not found or is not visible after %s seconds" timeout = timeout if timeout else self.utils.get_explicitly_wait() self.logger.error(msg, type(self).__name__, self.locator, parent_msg, timeout) exception.msg += "\n {}".format(msg % (type(self).__name__, self.locator, parent_msg, timeout)) raise exception return self
Search element and wait until it is visible :param timeout: max time to wait :returns: page element instance
379,027
def process_rdfgraph(self, rg, ont=None): if ont is None: ont = Ontology() subjs = list(rg.subjects(RDF.type, SKOS.ConceptScheme)) if len(subjs) == 0: logging.warning("No ConceptScheme") else: ont.id = self._uri2id(subjs[0]) subset_map = {} for concept in rg.subjects(RDF.type, SKOS.Concept): for s in self._get_schemes(rg, concept): subset_map[self._uri2id(s)] = s for concept in sorted(list(rg.subjects(RDF.type, SKOS.Concept))): concept_uri = str(concept) id=self._uri2id(concept) logging.info("ADDING: {}".format(id)) ont.add_node(id, self._get_label(rg,concept)) for defn in rg.objects(concept, SKOS.definition): if (defn.language == self.lang): td = TextDefinition(id, escape_value(defn.value)) ont.add_text_definition(td) for s in rg.objects(concept, SKOS.broader): ont.add_parent(id, self._uri2id(s)) for s in rg.objects(concept, SKOS.related): ont.add_parent(id, self._uri2id(s), self._uri2id(SKOS.related)) for m in rg.objects(concept, SKOS.exactMatch): ont.add_xref(id, self._uri2id(m)) for m in rg.objects(concept, SKOS.altLabel): syn = Synonym(id, val=self._uri2id(m)) ont.add_synonym(syn) for s in self._get_schemes(rg,concept): ont.add_to_subset(id, self._uri2id(s)) return ont
Transform a skos terminology expressed in an rdf graph into an Ontology object Arguments --------- rg: rdflib.Graph graph object Returns ------- Ontology
379,028
def plot_macadam( ellipse_scaling=10, plot_filter_positions=False, plot_standard_deviations=False, plot_rgb_triangle=True, plot_mesh=True, n=1, xy_to_2d=lambda xy: xy, axes_labels=("x", "y"), ): dir_path = os.path.dirname(os.path.realpath(__file__)) with open(os.path.join(dir_path, "data/macadam1942/table3.yaml")) as f: data = yaml.safe_load(f) centers = [] offsets = [] for datak in data: _, _, _, _, delta_y_delta_x, delta_s = numpy.array(datak["data"]).T offset = ( numpy.array([numpy.ones(delta_y_delta_x.shape[0]), delta_y_delta_x]) / numpy.sqrt(1 + delta_y_delta_x ** 2) * delta_s ) if offset.shape[1] < 2: continue centers.append([datak["x"], datak["y"]]) offsets.append(numpy.column_stack([+offset, -offset])) centers = numpy.array(centers) _plot_ellipse_data( centers, offsets, ellipse_scaling=ellipse_scaling, xy_to_2d=xy_to_2d, plot_mesh=plot_mesh, n=n, plot_rgb_triangle=plot_rgb_triangle, ) return
See <https://en.wikipedia.org/wiki/MacAdam_ellipse>, <https://doi.org/10.1364%2FJOSA.32.000247>.
379,029
def _handle_tag_definetext2(self): obj = _make_object("DefineText2") self._generic_definetext_parser(obj, self._get_struct_rgba) return obj
Handle the DefineText2 tag.
379,030
def exit_config_mode(self, exit_config="return", pattern=r">"): return super(HuaweiBase, self).exit_config_mode( exit_config=exit_config, pattern=pattern )
Exit configuration mode.
379,031
def classmethod(self, encoding): encoding = ensure_bytes(encoding) typecodes = parse_type_encoding(encoding) typecodes.insert(1, b) encoding = b.join(typecodes) def decorator(f): def objc_class_method(objc_cls, objc_cmd, *args): py_cls = ObjCClass(objc_cls) py_cls.objc_cmd = objc_cmd args = convert_method_arguments(encoding, args) result = f(py_cls, *args) if isinstance(result, ObjCClass): result = result.ptr.value elif isinstance(result, ObjCInstance): result = result.ptr.value return result name = f.__name__.replace(, ) self.add_class_method(objc_class_method, name, encoding) return objc_class_method return decorator
Function decorator for class methods.
379,032
def column_signs_(self): if self._always_positive(): return np.ones(self.n_features) self.unhasher.recalculate_attributes() return self.unhasher.column_signs_
Return a numpy array with expected signs of features. Values are * +1 when all known terms which map to the column have positive sign; * -1 when all known terms which map to the column have negative sign; * ``nan`` when there are both positive and negative known terms for this column, or when there is no known term which maps to this column.
379,033
def get_distance_metres(aLocation1, aLocation2): dlat = aLocation2.lat - aLocation1.lat dlong = aLocation2.lon - aLocation1.lon return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5
Returns the ground distance in metres between two LocationGlobal objects. This method is an approximation, and will not be accurate over large distances and close to the earth's poles. It comes from the ArduPilot test code: https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
379,034
def download(self): mp3_directory = self._pre_download() self.data.swifter.apply(func=lambda arg: self._download(*arg, mp3_directory), axis=1, raw=True) return mp3_directory
Downloads the data associated with this instance Return: mp3_directory (os.path): The directory into which the associated mp3's were downloaded
379,035
def download_task(url, headers, destination, download_type=): bot.verbose("Downloading %s from %s" % (download_type, url)) file_name = "%s.%s" % (destination, next(tempfile._get_candidate_names())) tar_download = download(url, file_name, headers=headers) try: shutil.move(tar_download, destination) except Exception: msg = "Cannot untar layer %s," % tar_download msg += " was there a problem with download?" bot.error(msg) sys.exit(1) return destination
download an image layer (.tar.gz) to a specified download folder. This task is done by using local versions of the same download functions that are used for the client. core stream/download functions of the parent client. Parameters ========== image_id: the shasum id of the layer, already determined to not exist repo_name: the image name (library/ubuntu) to retrieve download_folder: download to this folder. If not set, uses temp.
379,036
def function(self, x, y, sigma0, Rs, center_x=0, center_y=0): x_ = x - center_x y_ = y - center_y r = np.sqrt(x_**2 + y_**2) if isinstance(r, int) or isinstance(r, float): r = max(self._s, r) else: r[r < self._s] = self._s X = r / Rs f_ = sigma0 * Rs ** 2 * (np.log(X ** 2 / 4.) + 2 * self._F(X)) return f_
lensing potential :param x: :param y: :param sigma0: sigma0/sigma_crit :param a: :param s: :param center_x: :param center_y: :return:
379,037
def add(self, *args, **kwargs): for cookie in args: self.all_cookies.append(cookie) if cookie.name in self: continue self[cookie.name] = cookie for key, value in kwargs.items(): cookie = self.cookie_class(key, value) self.all_cookies.append(cookie) if key in self: continue self[key] = cookie
Add Cookie objects by their names, or create new ones under specified names. Any unnamed arguments are interpreted as existing cookies, and are added under the value in their .name attribute. With keyword arguments, the key is interpreted as the cookie name and the value as the UNENCODED value stored in the cookie.
379,038
def get_ranks(self): if self._use_non_text_features: return self._term_doc_matrix.get_metadata_freq_df() else: return self._term_doc_matrix.get_term_freq_df()
Returns ------- pd.DataFrame
379,039
def _fix_example_namespace(self): example_prefix = idgen_prefix = idgen.get_id_namespace_prefix() if example_prefix not in self._input_namespaces: return self._input_namespaces[example_prefix] = idgen.EXAMPLE_NAMESPACE.name
Attempts to resolve issues where our samples use 'http://example.com/' for our example namespace but python-stix uses 'http://example.com' by removing the former.
379,040
def process_warn_strings(arguments): def _capitalize(s): if s[:5] == "scons": return "SCons" + s[5:] else: return s.capitalize() for arg in arguments: elems = arg.lower().split() enable = 1 if elems[0] == : enable = 0 del elems[0] if len(elems) == 1 and elems[0] == : class_name = "Warning" else: class_name = .join(map(_capitalize, elems)) + "Warning" try: clazz = globals()[class_name] except KeyError: sys.stderr.write("No warning type: \n" % arg) else: if enable: enableWarningClass(clazz) elif issubclass(clazz, MandatoryDeprecatedWarning): fmt = "Can not disable mandataory warning: \n" sys.stderr.write(fmt % arg) else: suppressWarningClass(clazz)
Process string specifications of enabling/disabling warnings, as passed to the --warn option or the SetOption('warn') function. An argument to this option should be of the form <warning-class> or no-<warning-class>. The warning class is munged in order to get an actual class name from the classes above, which we need to pass to the {enable,disable}WarningClass() functions. The supplied <warning-class> is split on hyphens, each element is capitalized, then smushed back together. Then the string "Warning" is appended to get the class name. For example, 'deprecated' will enable the DeprecatedWarning class. 'no-dependency' will disable the DependencyWarning class. As a special case, --warn=all and --warn=no-all will enable or disable (respectively) the base Warning class of all warnings.
379,041
def add(self, component: Union[Component, Sequence[Component]]) -> None: try: self[Span(*self._available_cell())] = component except NoUnusedCellsError: span = list(self._spans.keys())[-1] self._spans[span] += component
Add a widget to the grid in the next available cell. Searches over columns then rows for available cells. Parameters ---------- components : bowtie._Component A Bowtie widget instance.
379,042
def flatatt(self, **attr): cs = attr = self._attr classes = self._classes data = self._data css = self._css attr = attr.copy() if attr else {} if classes: cs = .join(classes) attr[] = cs if css: attr[] = .join(( % (k, v) for k, v in css.items())) if data: for k, v in data.items(): attr[ % k] = dump_data_value(v) if attr: return .join(attr_iter(attr)) else: return
Return a string with attributes to add to the tag
379,043
def _process_thread(self, client): system_type = client.data.os_info.system print(.format(system_type)) artifact_list = [] if self.artifacts: print(.format(self.artifacts)) artifact_list = self.artifacts else: default_artifacts = self.artifact_registry.get(system_type, None) if default_artifacts: print(.format( system_type, .join(default_artifacts))) artifact_list.extend(default_artifacts) if self.extra_artifacts: print(.format(self.extra_artifacts)) artifact_list.extend(self.extra_artifacts) artifact_list = list(set(artifact_list)) if not artifact_list: return flow_args = flows_pb2.ArtifactCollectorFlowArgs( artifact_list=artifact_list, use_tsk=self.use_tsk, ignore_interpolation_errors=True, apply_parsers=False) flow_id = self._launch_flow(client, , flow_args) self._await_flow(client, flow_id) collected_flow_data = self._download_files(client, flow_id) if collected_flow_data: print(.format(flow_id, collected_flow_data)) fqdn = client.data.os_info.fqdn.lower() self.state.output.append((fqdn, collected_flow_data))
Process a single GRR client. Args: client: a GRR client object.
379,044
def serialize(self, queryset, **options): self.options = options self.stream = options.get("stream", StringIO()) self.primary_key = options.get("primary_key", None) self.properties = options.get("properties") self.geometry_field = options.get("geometry_field", "geom") self.use_natural_keys = options.get("use_natural_keys", False) self.bbox = options.get("bbox", None) self.bbox_auto = options.get("bbox_auto", None) self.srid = options.get("srid", GEOJSON_DEFAULT_SRID) self.crs = options.get("crs", True) self.start_serialization() if ValuesQuerySet is not None and isinstance(queryset, ValuesQuerySet): self.serialize_values_queryset(queryset) elif isinstance(queryset, list): self.serialize_object_list(queryset) elif isinstance(queryset, QuerySet): self.serialize_queryset(queryset) self.end_serialization() return self.getvalue()
Serialize a queryset.
379,045
def in_file(self, filename: str) -> Iterator[FunctionDesc]: yield from self.__filename_to_functions.get(filename, [])
Returns an iterator over all of the functions definitions that are contained within a given file.
379,046
def _write_response(self, response): status = .format(response[], response[], responses[response[]]) self.logger.debug("Responding status: ", status.strip()) self._write_transport(status) if in response and not in response[]: response[][] = len(response[]) response[][] = datetime.utcnow().strftime( "%a, %d %b %Y %H:%M:%S +0000") for (header, content) in response[].items(): self.logger.debug("Sending header: ", header, content) self._write_transport(.format(header, content)) self._write_transport() if in response: self._write_transport(response[])
Write the response back to the client Arguments: response -- the dictionary containing the response.
379,047
def set_python(self, value): if not self.multiselect: if value and not isinstance(value, list): value = [value] value = value or [] records = SortedDict() for record in value: self.validate_value(record) records[record.id] = record return_value = self._set(records) self.record._raw[][self.id] = self.get_swimlane() return return_value
Expect list of record instances, convert to a SortedDict for internal representation
379,048
def qteRemoveMode(self, mode: str): for idx, item in enumerate(self._qteModeList): if item[0] == mode: self._qteModeList.remove(item) item[2].hide() item[2].deleteLater() self._qteUpdateLabelWidths() return True return False
Remove ``mode`` and associated label. If ``mode`` does not exist then nothing happens and the method returns **False**, otherwise **True**. |Args| * ``pos`` (**QRect**): size and position of new window. * ``windowID`` (**str**): unique window ID. |Returns| * **bool**: **True** if the item was removed and **False** if there was an error (most likely ``mode`` does not exist). |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type.
379,049
def send_emission(self): if self._emit_queue.empty(): return emit = self._emit_queue.get() emit()
emit and remove the first emission in the queue
379,050
def check_tx_with_confirmations(self, tx_hash: str, confirmations: int) -> bool: tx_receipt = self.w3.eth.getTransactionReceipt(tx_hash) if not tx_receipt or tx_receipt[] is None: return False else: return (self.w3.eth.blockNumber - tx_receipt[]) >= confirmations
Check tx hash and make sure it has the confirmations required :param w3: Web3 instance :param tx_hash: Hash of the tx :param confirmations: Minimum number of confirmations required :return: True if tx was mined with the number of confirmations required, False otherwise
379,051
def pad_length(s): padding_chars = [ u, u, u, u, u, u, u, u, u, u, u, u, ] padding_generator = itertools.cycle(padding_chars) target_lengths = { six.moves.range(1, 11): 3, six.moves.range(11, 21): 2, six.moves.range(21, 31): 1.8, six.moves.range(31, 51): 1.6, six.moves.range(51, 71): 1.4, } if len(s) > 70: target_length = int(math.ceil(len(s) * 1.3)) else: for r, v in target_lengths.items(): if len(s) in r: target_length = int(math.ceil(len(s) * v)) diff = target_length - len(s) pad = u"".join([next(padding_generator) for _ in range(diff)]) return s + pad
Appends characters to the end of the string to increase the string length per IBM Globalization Design Guideline A3: UI Expansion. https://www-01.ibm.com/software/globalization/guidelines/a3.html :param s: String to pad. :returns: Padded string.
379,052
def entries(self): timer = Timer() passwords = [] logger.info("Scanning %s ..", format_path(self.directory)) listing = self.context.capture("find", "-type", "f", "-name", "*.gpg", "-print0") for filename in split(listing, "\0"): basename, extension = os.path.splitext(filename) if extension == ".gpg": passwords.append(PasswordEntry(name=os.path.normpath(basename), store=self)) logger.verbose("Found %s in %s.", pluralize(len(passwords), "password"), timer) return natsort(passwords, key=lambda e: e.name)
A list of :class:`PasswordEntry` objects.
379,053
def get_fun(fun): with _get_serv(ret=None, commit=True) as cur: sql = cur.execute(sql, (fun,)) data = cur.fetchall() ret = {} if data: for minion, _, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret
Return a dict of the last function called for all minions
379,054
def set(self, key, value, *, section=DataStoreDocumentSection.Data): key_notation = .join([section, key]) try: self._delete_gridfs_data(self._data_from_dotnotation(key_notation, default=None)) except KeyError: logger.info(.format(key_notation)) result = self._collection.update_one( {"_id": ObjectId(self._workflow_id)}, { "$set": { key_notation: self._encode_value(value) }, "$currentDate": {"lastModified": True} } ) return result.modified_count == 1
Store a value under the specified key in the given section of the document. This method stores a value into the specified section of the workflow data store document. Any existing value is overridden. Before storing a value, any linked GridFS document under the specified key is deleted. Args: key (str): The key pointing to the value that should be stored/updated. It supports MongoDB's dot notation for nested fields. value: The value that should be stored/updated. section (DataStoreDocumentSection): The section from which the data should be retrieved. Returns: bool: ``True`` if the value could be set/updated, otherwise ``False``.
379,055
def has_column(self, table, column): column = column.lower() return column in list(map(lambda x: x.lower(), self.get_column_listing(table)))
Determine if the given table has a given column. :param table: The table :type table: str :type column: str :rtype: bool
379,056
def encode_ulid(value: hints.Buffer) -> str: length = len(value) if length != 16: raise ValueError(.format(length)) encoding = ENCODING return \ encoding[(value[0] & 224) >> 5] + \ encoding[value[0] & 31] + \ encoding[(value[1] & 248) >> 3] + \ encoding[((value[1] & 7) << 2) | ((value[2] & 192) >> 6)] + \ encoding[((value[2] & 62) >> 1)] + \ encoding[((value[2] & 1) << 4) | ((value[3] & 240) >> 4)] + \ encoding[((value[3] & 15) << 1) | ((value[4] & 128) >> 7)] + \ encoding[(value[4] & 124) >> 2] + \ encoding[((value[4] & 3) << 3) | ((value[5] & 224) >> 5)] + \ encoding[value[5] & 31] + \ encoding[(value[6] & 248) >> 3] + \ encoding[((value[6] & 7) << 2) | ((value[7] & 192) >> 6)] + \ encoding[(value[7] & 62) >> 1] + \ encoding[((value[7] & 1) << 4) | ((value[8] & 240) >> 4)] + \ encoding[((value[8] & 15) << 1) | ((value[9] & 128) >> 7)] + \ encoding[(value[9] & 124) >> 2] + \ encoding[((value[9] & 3) << 3) | ((value[10] & 224) >> 5)] + \ encoding[value[10] & 31] + \ encoding[(value[11] & 248) >> 3] + \ encoding[((value[11] & 7) << 2) | ((value[12] & 192) >> 6)] + \ encoding[(value[12] & 62) >> 1] + \ encoding[((value[12] & 1) << 4) | ((value[13] & 240) >> 4)] + \ encoding[((value[13] & 15) << 1) | ((value[14] & 128) >> 7)] + \ encoding[(value[14] & 124) >> 2] + \ encoding[((value[14] & 3) << 3) | ((value[15] & 224) >> 5)] + \ encoding[value[15] & 31]
Encode the given buffer to a :class:`~str` using Base32 encoding. .. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID bytes specifically and is not meant for arbitrary encoding. :param value: Bytes to encode :type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview` :return: Value encoded as a Base32 string :rtype: :class:`~str` :raises ValueError: when the value is not 16 bytes
379,057
def addRnaQuantificationSet(self): self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) if self._args.name is None: name = getNameFromPath(self._args.filePath) else: name = self._args.name rnaQuantificationSet = rna_quantification.SqliteRnaQuantificationSet( dataset, name) referenceSetName = self._args.referenceSetName if referenceSetName is None: raise exceptions.RepoManagerException( "A reference set name must be provided") referenceSet = self._repo.getReferenceSetByName(referenceSetName) rnaQuantificationSet.setReferenceSet(referenceSet) rnaQuantificationSet.populateFromFile(self._args.filePath) rnaQuantificationSet.setAttributes(json.loads(self._args.attributes)) self._updateRepo( self._repo.insertRnaQuantificationSet, rnaQuantificationSet)
Adds an rnaQuantificationSet into this repo
379,058
def EXTRA_LOGGING(self): input_text = get(, ) modules = input_text.split() if input_text: modules = input_text.split() modules = [x.split() for x in modules] else: modules = [] return modules
lista modulos con los distintos niveles a logear y su nivel de debug Por ejemplo: [Logs] EXTRA_LOGGING = oscar.paypal:DEBUG, django.db:INFO
379,059
def update(self, allow_partial=True, force=False, **kwargs): if kwargs: self.__init__(partial=allow_partial, force=force, **kwargs) return not self._partial if not force and CACHE.get(hash(self)): cached = CACHE[hash(self)] for field in self._SIMPLE_FIELDS | self._COMPLEX_FIELDS: v = getattr(cached, field) setattr(self, field, v) self._partial = False logging.info(f) return True resp_dict = element_lookup_by_id(self.type, self.id) self.__init__(partial=False, **resp_dict) return True
Updates record and returns True if record is complete after update, else False.
379,060
def order(self, *args): if not args: return self orders = [] o = self.orders if o: orders.append(o) for arg in args: if isinstance(arg, model.Property): orders.append(datastore_query.PropertyOrder(arg._name, _ASC)) elif isinstance(arg, datastore_query.Order): orders.append(arg) else: raise TypeError( % arg) if not orders: orders = None elif len(orders) == 1: orders = orders[0] else: orders = datastore_query.CompositeOrder(orders) return self.__class__(kind=self.kind, ancestor=self.ancestor, filters=self.filters, orders=orders, app=self.app, namespace=self.namespace, default_options=self.default_options, projection=self.projection, group_by=self.group_by)
Return a new Query with additional sort order(s) applied.
379,061
def signature(self, value): h = HMAC(self.key, self.digest, backend=settings.CRYPTOGRAPHY_BACKEND) h.update(force_bytes(value)) return h
:type value: any :rtype: HMAC
379,062
def find_packages_parents_requirements_dists(pkg_names, working_set=None): dists = [] targets = set(pkg_names) for dist in find_packages_requirements_dists(pkg_names, working_set): if dist.project_name in targets: continue dists.append(dist) return dists
Leverages the `find_packages_requirements_dists` but strip out the distributions that matches pkg_names.
379,063
def split_token(output): output = ensure_tuple(output) flags, i, len_output, data_allowed = set(), 0, len(output), True while i < len_output and isflag(output[i]): if output[i].must_be_first and i: raise ValueError("{} flag must be first.".format(output[i])) if i and output[i - 1].must_be_last: raise ValueError("{} flag must be last.".format(output[i - 1])) if output[i] in flags: raise ValueError("Duplicate flag {}.".format(output[i])) flags.add(output[i]) data_allowed &= output[i].allows_data i += 1 output = output[i:] if not data_allowed and len(output): raise ValueError("Output data provided after a flag that does not allow data.") return flags, output
Split an output into token tuple, real output tuple. :param output: :return: tuple, tuple
379,064
def failure_count(self): return len([i for i, result in enumerate(self.data) if result.failure])
Amount of failed test cases in this list. :return: integer
379,065
def save_neighbour_info(self, cache_dir, mask=None, **kwargs): if cache_dir: mask_name = getattr(mask, , None) filename = self._create_cache_filename( cache_dir, mask=mask_name, **kwargs) LOG.info(, filename) cache = self._read_resampler_attrs() self._apply_cached_indexes(cache, persist=True) self._index_caches[mask_name] = cache np.savez(filename, **cache)
Cache resampler's index arrays if there is a cache dir.
379,066
def list_user_access(self, user): user = utils.get_name(user) uri = "/%s/%s/databases" % (self.uri_base, user) try: resp, resp_body = self.api.method_get(uri) except exc.NotFound as e: raise exc.NoSuchDatabaseUser("User does not exist." % user) dbs = resp_body.get("databases", {}) return [CloudDatabaseDatabase(self, db) for db in dbs]
Returns a list of all database names for which the specified user has access rights.
379,067
def _align_header(header, alignment, width, visible_width, is_multiline=False, width_fn=None): "Pad string header to width chars given known visible_width of the header." if is_multiline: header_lines = re.split(_multiline_codes, header) padded_lines = [_align_header(h, alignment, width, width_fn(h)) for h in header_lines] return "\n".join(padded_lines) ninvisible = len(header) - visible_width width += ninvisible if alignment == "left": return _padright(width, header) elif alignment == "center": return _padboth(width, header) elif not alignment: return "{0}".format(header) else: return _padleft(width, header)
Pad string header to width chars given known visible_width of the header.
379,068
def save(self, path="speech"): if self._data is None: raise Exception("There's nothing to save") extension = "." + self.__params["format"] if os.path.splitext(path)[1] != extension: path += extension with open(path, "wb") as f: for d in self._data: f.write(d) return path
Save data in file. Args: path (optional): A path to save file. Defaults to "speech". File extension is optional. Absolute path is allowed. Returns: The path to the saved file.
379,069
def _collect_masters_map(self, response): while True: try: data, addr = self._socket.recvfrom(0x400) if data: if addr not in response: response[addr] = [] response[addr].append(data) else: break except Exception as err: if not response: self.log.error(, err) break
Collect masters map from the network. :return:
379,070
def makeParameterTable(filename, params): macro_panel = "\multicolumn{3}{c}{\\textbf{Macroeconomic Parameters} } \n" macro_panel += "\\\\ $\\kapShare$ & " + "{:.2f}".format(params.CapShare) + " & Capital.texw_1.texw_2.texw') as f: f.write(slides2_output) f.close()
Makes the parameter table for the paper, saving it to a tex file in the tables folder. Also makes two partial parameter tables for the slides. Parameters ---------- filename : str Name of the file in which to save output (in the tables directory). Suffix .tex is automatically added. params : Object containing the parameter values. Returns ------- None
379,071
def do_bash_complete(cli, prog_name): comp_words = os.environ[] try: cwords = shlex.split(comp_words) quoted = False except ValueError: cwords = split_args(comp_words) quoted = True cword = int(os.environ[]) args = cwords[1:cword] try: incomplete = cwords[cword] except IndexError: incomplete = choices = get_choices(cli, prog_name, args, incomplete) if quoted: echo(.join(opt for opt, _ in choices), nl=False) else: echo(.join(re.sub(r, r, opt) for opt, _ in choices), nl=False) return True
Do the completion for bash Parameters ---------- cli : click.Command The main click Command of the program prog_name : str The program name on the command line Returns ------- bool True if the completion was successful, False otherwise
379,072
def set(self, stype, sid, fields): if stype not in [, , ]: raise SyntaxError("{} is not a valid type for set. Should be one of: votelist, vnlist or wishlist.".format(stype)) command = "{} {} {}".format(stype, id, ujson.dumps(fields)) data = self.connection.send_command(, command) if in data: raise ServerError(data[], data[]) else: return True
Send a request to the API to modify something in the database if logged in. :param str stype: What are we modifying? One of: votelist, vnlist, wishlist :param int sid: The ID that we're modifying. :param dict fields: A dictionary of the fields and their values :raises ServerError: Raises a ServerError if an error is returned :return bool: True if successful, error otherwise
379,073
def to_prettytable(df): pt = PrettyTable() pt.field_names = df.columns for tp in zip(*(l for col, l in df.iteritems())): pt.add_row(tp) return pt
Convert DataFrame into ``PrettyTable``.
379,074
def swap(tokens, maxdist=2): assert maxdist >= 2 tokens = list(tokens) if maxdist > len(tokens): maxdist = len(tokens) l = len(tokens) for i in range(0,l - 1): for permutation in permutations(tokens[i:i+maxdist]): if permutation != tuple(tokens[i:i+maxdist]): newtokens = tokens[:i] newtokens += permutation newtokens += tokens[i+maxdist:] yield newtokens if maxdist == len(tokens): break
Perform a swap operation on a sequence of tokens, exhaustively swapping all tokens up to the maximum specified distance. This is a subset of all permutations.
379,075
def implied_local_space(*, arg_index=None, keys=None): from qnet.algebra.core.hilbert_space_algebra import ( HilbertSpace, LocalSpace) def args_to_local_space(cls, args, kwargs): if isinstance(args[arg_index], LocalSpace): new_args = args else: if isinstance(args[arg_index], (int, str)): try: hs = cls._default_hs_cls(args[arg_index]) except AttributeError: hs = LocalSpace(args[arg_index]) else: hs = args[arg_index] assert isinstance(hs, HilbertSpace) new_args = (tuple(args[:arg_index]) + (hs,) + tuple(args[arg_index + 1:])) return new_args, kwargs def kwargs_to_local_space(cls, args, kwargs): if all([isinstance(kwargs[key], LocalSpace) for key in keys]): new_kwargs = kwargs else: new_kwargs = {} for key, val in kwargs.items(): if key in keys: if isinstance(val, (int, str)): try: val = cls._default_hs_cls(val) except AttributeError: val = LocalSpace(val) assert isinstance(val, HilbertSpace) new_kwargs[key] = val return args, new_kwargs def to_local_space(cls, args, kwargs): new_args, __ = args_to_local_space(args, kwargs, arg_index) __, new_kwargs = kwargs_to_local_space(args, kwargs, keys) return new_args, new_kwargs if (arg_index is not None) and (keys is None): return args_to_local_space elif (arg_index is None) and (keys is not None): return kwargs_to_local_space elif (arg_index is not None) and (keys is not None): return to_local_space else: raise ValueError("must give at least one of arg_index and keys")
Return a simplification that converts the positional argument `arg_index` from (str, int) to a subclass of :class:`.LocalSpace`, as well as any keyword argument with one of the given keys. The exact type of the resulting Hilbert space is determined by the `default_hs_cls` argument of :func:`init_algebra`. In many cases, we have :func:`implied_local_space` (in ``create``) in addition to a conversion in ``__init__``, so that :func:`match_replace` etc can rely on the relevant arguments being a :class:`HilbertSpace` instance.
379,076
def cancel(self, consumer_tag): if not self.channel.connection: return self.channel.basic_cancel(consumer_tag)
Cancel a channel by consumer tag.
379,077
def update_dependency(self, tile, depinfo, destdir=None): if destdir is None: destdir = os.path.join(tile.folder, , , depinfo[]) has_version = False had_version = False if os.path.exists(destdir): has_version = True had_version = True for priority, rule in self.rules: if not self._check_rule(rule, depinfo): continue resolver = self._find_resolver(rule) if has_version: deptile = IOTile(destdir) self._save_depsettings(destdir, settings) if had_version: return "updated" return "installed" if has_version: return "already installed" return "not found"
Attempt to install or update a dependency to the latest version. Args: tile (IOTile): An IOTile object describing the tile that has the dependency depinfo (dict): a dictionary from tile.dependencies specifying the dependency destdir (string): An optional folder into which to unpack the dependency Returns: string: a string indicating the outcome. Possible values are: "already installed" "installed" "updated" "not found"
379,078
def set_parameter(name, parameter, value, path=None): * if not exists(name, path=path): return None cmd = if path: cmd += .format(pipes.quote(path)) cmd += .format(name, parameter, value) ret = __salt__[](cmd, python_shell=False) if ret[] != 0: return False else: return True
Set the value of a cgroup parameter for a container. path path to the container parent directory default: /var/lib/lxc (system) .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' lxc.set_parameter name parameter value
379,079
def get_group_category(self, category): category_id = obj_or_id(category, "category", (GroupCategory,)) response = self.__requester.request( , .format(category_id) ) return GroupCategory(self.__requester, response.json())
Get a single group category. :calls: `GET /api/v1/group_categories/:group_category_id \ <https://canvas.instructure.com/doc/api/group_categories.html#method.group_categories.show>`_ :param category: The object or ID of the category. :type category: :class:`canvasapi.group.GroupCategory` or int :rtype: :class:`canvasapi.group.GroupCategory`
379,080
def sharedInterfaces(): def get(self): if not self.sharedInterfaceNames: return () if self.sharedInterfaceNames == ALL_IMPLEMENTED_DB: I = implementedBy(self.sharedItem.__class__) L = list(I) T = tuple(L) return T else: return tuple(map(namedAny, self.sharedInterfaceNames.split(u))) def set(self, newValue): self.sharedAttributeNames = _interfacesToNames(newValue) return get, set
This attribute is the public interface for code which wishes to discover the list of interfaces allowed by this Share. It is a list of Interface objects.
379,081
def lower(self): if self._reaction in self._view._flipped: return -super(FlipableFluxBounds, self).upper return super(FlipableFluxBounds, self).lower
Lower bound
379,082
def match_tracks(self, set_a, set_b, closest_matches=False): costs = self.track_cost_matrix(set_a, set_b) * 100 min_row_costs = costs.min(axis=1) min_col_costs = costs.min(axis=0) good_rows = np.where(min_row_costs < 100)[0] good_cols = np.where(min_col_costs < 100)[0] assignments = [] if len(good_rows) > 0 and len(good_cols) > 0: if closest_matches: b_matches = costs[np.meshgrid(good_rows, good_cols, indexing=)].argmin(axis=1) a_matches = np.arange(b_matches.size) initial_assignments = [(good_rows[a_matches[x]], good_cols[b_matches[x]]) for x in range(b_matches.size)] else: munk = Munkres() initial_assignments = munk.compute(costs[np.meshgrid(good_rows, good_cols, indexing=)].tolist()) initial_assignments = [(good_rows[x[0]], good_cols[x[1]]) for x in initial_assignments] for a in initial_assignments: if costs[a[0], a[1]] < 100: assignments.append(a) return assignments
Find the optimal set of matching assignments between set a and set b. This function supports optimal 1:1 matching using the Munkres method and matching from every object in set a to the closest object in set b. In this situation set b accepts multiple matches from set a. Args: set_a: set_b: closest_matches: Returns:
379,083
def build_polygons(self, polygons): if not polygons: return if not isinstance(polygons, (list, tuple)): raise AttributeError() for points in polygons: if isinstance(points, dict): self.add_polygon(**points) elif isinstance(points, (tuple, list)): path = [] for coords in points: if len(coords) != 2: raise AttributeError() path.append({: coords[0], : coords[1]}) polygon_dict = self.build_polygon_dict(path) self.add_polygon(**polygon_dict)
Process data to construct polygons This method is built from the assumption that the polygons parameter is a list of: list of lists or tuples : a list of path points, each one indicating the point coordinates -- [lat,lng], [lat, lng], (lat, lng), ... tuple of lists or tuples : a tuple of path points, each one indicating the point coordinates -- (lat,lng), [lat, lng], (lat, lng), ... dicts: a dictionary with polylines attributes So, for instance, we have this general scenario as a input parameter: polygon = { 'stroke_color': '#0AB0DE', 'stroke_opacity': 1.0, 'stroke_weight': 3, 'fill_color': '#FFABCD', 'fill_opacity': 0.5, 'path': [{'lat': 33.678, 'lng': -116.243}, {'lat': 33.679, 'lng': -116.244}, {'lat': 33.680, 'lng': -116.250}, {'lat': 33.681, 'lng': -116.239}, {'lat': 33.678, 'lng': -116.243}] } path1 = [(33.665, -116.235), (33.666, -116.256), (33.667, -116.250), (33.668, -116.229)] path2 = ((33.659, -116.243), (33.660, -116.244), (33.649, -116.250), (33.644, -116.239)) path3 = ([33.688, -116.243], [33.680, -116.244], [33.682, -116.250], [33.690, -116.239]) path4 = [[33.690, -116.243], [33.691, -116.244], [33.692, -116.250], [33.693, -116.239]] polygons = [polygon, path1, path2, path3, path4]
379,084
def untrigger(queue, trigger=_c.FSQ_TRIGGER): trigger_path = fsq_path.trigger(queue, trigger=trigger) _queue_ok(os.path.dirname(trigger_path)) try: os.unlink(trigger_path) except (OSError, IOError, ), e: if e.errno != errno.ENOENT: raise FSQConfigError(e.errno, wrap_io_os_err(e))
Uninstalls the trigger for the specified queue -- if a queue has no trigger, this function is a no-op.
379,085
def assert_200(response, max_len=500): if response.status_code == 200: return raise ValueError( "Response was {}, not 200:\n{}\n{}".format( response.status_code, json.dumps(dict(response.headers), indent=2), response.content[:max_len]))
Check that a HTTP response returned 200.
379,086
def read_pure_water_absorption_from_file(self, file_name): lg.info() try: self.a_water = self._read_iop_from_file(file_name) except: lg.exception( + file_name)
Read the pure water absorption from a csv formatted file :param file_name: filename and path of the csv file
379,087
def encoded_to_array(encoded): if not isinstance(encoded, dict): if is_sequence(encoded): as_array = np.asanyarray(encoded) return as_array else: raise ValueError() encoded = decode_keys(encoded) dtype = np.dtype(encoded[]) if in encoded: array = np.frombuffer(base64.b64decode(encoded[]), dtype) elif in encoded: array = np.frombuffer(encoded[], dtype=dtype) if in encoded: array = array.reshape(encoded[]) return array
Turn a dictionary with base64 encoded strings back into a numpy array. Parameters ------------ encoded : dict Has keys: dtype: string of dtype shape: int tuple of shape base64: base64 encoded string of flat array binary: decode result coming from numpy.tostring Returns ---------- array: numpy array
379,088
def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True): try:import pods except ImportError:print() data = pods.datasets.toy_linear_1d_classification(seed=seed) Y = data[][:, 0:1] Y[Y.flatten() == -1] = 0 kernel = GPy.kern.RBF(1) likelihood = GPy.likelihoods.Bernoulli(gp_link=GPy.likelihoods.link_functions.Heaviside()) ep = GPy.inference.latent_function_inference.expectation_propagation.EP() m = GPy.core.GP(X=data[], Y=Y, kernel=kernel, likelihood=likelihood, inference_method=ep, name=) if optimize: for _ in range(5): m.optimize(max_iters=int(max_iters/5)) print(m) if plot: from matplotlib import pyplot as plt fig, axes = plt.subplots(2, 1) m.plot_f(ax=axes[0]) m.plot(ax=axes[1]) print(m) return m
Simple 1D classification example using a heavy side gp transformation :param seed: seed value for data generation (default is 4). :type seed: int
379,089
def RABC(self): A = self.raw_data[-3]*2 - self.raw_data[-6] B = self.raw_data[-2]*2 - self.raw_data[-5] C = self.raw_data[-1]*2 - self.raw_data[-4] return % (A,B,C)
Return ABC 轉折點 ABC
379,090
def complete(self, uio, dropped=False): if self.dropped and not dropped: return for end in [, ]: if getattr(self, end): continue uio.show( + end + ) uio.show() uio.show(self.summary()) try: endpoints = [] remaining = self.amount while remaining: account = uio.text(, None) amount = uio.decimal( , default=remaining, lower=0, upper=remaining ) endpoints.append(Endpoint(account, amount)) remaining = self.amount \ - sum(map(lambda x: x.amount, endpoints)) except ui.RejectWarning: sys.exit("bye!") if end == : endpoints = map( lambda x: Endpoint(x.account, -x.amount), endpoints ) setattr(self, end, endpoints)
Query for all missing information in the transaction
379,091
def get_finder(import_path): finder_class = import_string(import_path) if not issubclass(finder_class, BaseProcessesFinder): raise ImproperlyConfigured( .format(finder_class, BaseProcessesFinder)) return finder_class()
Get a process finder.
379,092
def as_dict(self): d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "structure": self.structure.as_dict(), "frequencies": list(self.frequencies), "densities": list(self.densities), "pdos": []} if len(self.pdos) > 0: for at in self.structure: d["pdos"].append(list(self.pdos[at])) return d
Json-serializable dict representation of CompletePhononDos.
379,093
def to_fmt(self) -> str: infos = fmt.end(";\n", []) s = fmt.sep(, []) for ids in sorted(self.states.keys()): s.lsdata.append(str(ids)) infos.lsdata.append(fmt.block(, , [s])) infos.lsdata.append("events:" + repr(self.events)) infos.lsdata.append( "named_events:" + repr(list(self.named_events.keys())) ) infos.lsdata.append("uid_events:" + repr(list(self.uid_events.keys()))) return infos
Provide a useful representation of the register.
379,094
def create_simple_tear_sheet(returns, positions=None, transactions=None, benchmark_rets=None, slippage=None, estimate_intraday=, live_start_date=None, turnover_denom=, header_rows=None): positions = utils.check_intraday(estimate_intraday, returns, positions, transactions) if (slippage is not None) and (transactions is not None): returns = txn.adjust_returns_for_slippage(returns, positions, transactions, slippage) always_sections = 4 positions_sections = 4 if positions is not None else 0 transactions_sections = 2 if transactions is not None else 0 live_sections = 1 if live_start_date is not None else 0 benchmark_sections = 1 if benchmark_rets is not None else 0 vertical_sections = sum([ always_sections, positions_sections, transactions_sections, live_sections, benchmark_sections, ]) if live_start_date is not None: live_start_date = ep.utils.get_utc_timestamp(live_start_date) plotting.show_perf_stats(returns, benchmark_rets, positions=positions, transactions=transactions, turnover_denom=turnover_denom, live_start_date=live_start_date, header_rows=header_rows) fig = plt.figure(figsize=(14, vertical_sections * 6)) gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5) ax_rolling_returns = plt.subplot(gs[:2, :]) i = 2 if benchmark_rets is not None: ax_rolling_beta = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_rolling_sharpe = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 ax_underwater = plt.subplot(gs[i, :], sharex=ax_rolling_returns) i += 1 plotting.plot_rolling_returns(returns, factor_returns=benchmark_rets, live_start_date=live_start_date, cone_std=(1.0, 1.5, 2.0), ax=ax_rolling_returns) ax_rolling_returns.set_title() if benchmark_rets is not None: plotting.plot_rolling_beta(returns, benchmark_rets, ax=ax_rolling_beta) plotting.plot_rolling_sharpe(returns, ax=ax_rolling_sharpe) plotting.plot_drawdown_underwater(returns, ax=ax_underwater) if positions is not None: ax_exposures = plt.subplot(gs[i, :]) i += 1 ax_top_positions = plt.subplot(gs[i, :], sharex=ax_exposures) i += 1 ax_holdings = plt.subplot(gs[i, :], sharex=ax_exposures) i += 1 ax_long_short_holdings = plt.subplot(gs[i, :]) i += 1 positions_alloc = pos.get_percent_alloc(positions) plotting.plot_exposures(returns, positions, ax=ax_exposures) plotting.show_and_plot_top_positions(returns, positions_alloc, show_and_plot=0, hide_positions=False, ax=ax_top_positions) plotting.plot_holdings(returns, positions_alloc, ax=ax_holdings) plotting.plot_long_short_holdings(returns, positions_alloc, ax=ax_long_short_holdings) if transactions is not None: ax_turnover = plt.subplot(gs[i, :]) i += 1 ax_txn_timings = plt.subplot(gs[i, :]) i += 1 plotting.plot_turnover(returns, transactions, positions, ax=ax_turnover) plotting.plot_txn_time_hist(transactions, ax=ax_txn_timings) for ax in fig.axes: plt.setp(ax.get_xticklabels(), visible=True)
Simpler version of create_full_tear_sheet; generates summary performance statistics and important plots as a single image. - Plots: cumulative returns, rolling beta, rolling Sharpe, underwater, exposure, top 10 holdings, total holdings, long/short holdings, daily turnover, transaction time distribution. - Never accept market_data input (market_data = None) - Never accept sector_mappings input (sector_mappings = None) - Never perform bootstrap analysis (bootstrap = False) - Never hide posistions on top 10 holdings plot (hide_positions = False) - Always use default cone_std (cone_std = (1.0, 1.5, 2.0)) Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - Time series with decimal returns. - Example: 2015-07-16 -0.012143 2015-07-17 0.045350 2015-07-20 0.030957 2015-07-21 0.004902 positions : pd.DataFrame, optional Daily net position values. - Time series of dollar amount invested in each position and cash. - Days where stocks are not held can be represented by 0 or NaN. - Non-working capital is labelled 'cash' - Example: index 'AAPL' 'MSFT' cash 2004-01-09 13939.3800 -14012.9930 711.5585 2004-01-12 14492.6300 -14624.8700 27.1821 2004-01-13 -13853.2800 13653.6400 -43.6375 transactions : pd.DataFrame, optional Executed trade volumes and fill prices. - One row per trade. - Trades on different names that occur at the same time will have identical indicies. - Example: index amount price symbol 2004-01-09 12:18:01 483 324.12 'AAPL' 2004-01-09 12:18:01 122 83.10 'MSFT' 2004-01-13 14:12:23 -75 340.43 'AAPL' benchmark_rets : pd.Series, optional Daily returns of the benchmark, noncumulative. slippage : int/float, optional Basis points of slippage to apply to returns before generating tearsheet stats and plots. If a value is provided, slippage parameter sweep plots will be generated from the unadjusted returns. Transactions and positions must also be passed. - See txn.adjust_returns_for_slippage for more details. live_start_date : datetime, optional The point in time when the strategy began live trading, after its backtest period. This datetime should be normalized. turnover_denom : str, optional Either AGB or portfolio_value, default AGB. - See full explanation in txn.get_turnover. header_rows : dict or OrderedDict, optional Extra rows to display at the top of the perf stats table. set_context : boolean, optional If True, set default plotting style context.
379,095
def check_support_user_port(cls, hw_info_ex): return ((hw_info_ex.m_dwProductCode & PRODCODE_MASK_PID) != ProductCode.PRODCODE_PID_BASIC) \ and ((hw_info_ex.m_dwProductCode & PRODCODE_MASK_PID) != ProductCode.PRODCODE_PID_RESERVED1) \ and cls.check_version_is_equal_or_higher(hw_info_ex.m_dwFwVersionEx, 2, 16)
Checks whether the module supports a user I/O port. :param HardwareInfoEx hw_info_ex: Extended hardware information structure (see method :meth:`get_hardware_info`). :return: True when the module supports a user I/O port, otherwise False. :rtype: bool
379,096
async def send_photo(self, path, entity): await self.send_file( entity, path, progress_callback=self.upload_progress_callback ) print()
Sends the file located at path to the desired entity as a photo
379,097
def solve_limited(self, assumptions=[]): if self.maplesat: if self.use_timer: start_time = time.clock() def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL) self.status = pysolvers.maplechrono_solve_lim(self.maplesat, assumptions) def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler) if self.use_timer: self.call_time = time.clock() - start_time self.accu_time += self.call_time return self.status
Solve internal formula using given budgets for conflicts and propagations.
379,098
def remove_properties(self): if self.features_layer is not None: self.features_layer.remove_properties() if self.header is not None: self.header.remove_lp()
Removes the property layer (if exists) of the object (in memory)
379,099
def quaternion_from_axis_rotation(angle, axis): out = np.zeros(4, dtype=float) if axis == : out[1] = 1 elif axis == : out[2] = 1 elif axis == : out[3] = 1 else: raise ValueError() out *= math.sin(angle/2.0) out[0] = math.cos(angle/2.0) return Quaternion(out)
Return quaternion for rotation about given axis. Args: angle (float): Angle in radians. axis (str): Axis for rotation Returns: Quaternion: Quaternion for axis rotation. Raises: ValueError: Invalid input axis.