code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def send_publish(self, mid, topic, payload, qos, retain, dup): self.logger.debug("Send PUBLISH") if self.sock == NC.INVALID_SOCKET: return NC.ERR_NO_CONN return self._do_send_publish(mid, utf8encode(topic), utf8encode(payload), qos, retain, dup)
Send PUBLISH.
def encode(self, delimiter=';'): try: return delimiter.join([str(f) for f in [ self.node_id, self.child_id, int(self.type), self.ack, int(self.sub_type), self.payload, ]]) + '\n' except ValueError: _LOGGER.error('Error encoding message to gateway')
Encode a command string from message.
def filter_records(root, head, update, filters=()): root, head, update = freeze(root), freeze(head), freeze(update) for filter_ in filters: root, head, update = filter_(root, head, update) return thaw(root), thaw(head), thaw(update)
Apply the filters to the records.
def run_spyder(app, options, args): main = MainWindow(options) try: main.setup() except BaseException: if main.console is not None: try: main.console.shell.exit_interpreter() except BaseException: pass raise main.show() main.post_visible_setup() if main.console: main.console.shell.interpreter.namespace['spy'] = \ Spy(app=app, window=main) if args: for a in args: main.open_external_file(a) if sys.platform == 'darwin': QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True) if running_in_mac_app(): app.sig_open_external_file.connect(main.open_external_file) app.focusChanged.connect(main.change_last_focused_widget) if not running_under_pytest(): app.exec_() return main
Create and show Spyder's main window Start QApplication event loop
def list_datasets(name=None): reg = registry.get_registry(Dataset) if name is not None: class_ = reg[name.lower()] return _REGSITRY_NAME_KWARGS[class_] else: return { dataset_name: _REGSITRY_NAME_KWARGS[class_] for dataset_name, class_ in registry.get_registry(Dataset).items() }
Get valid datasets and registered parameters. Parameters ---------- name : str or None, default None Return names and registered parameters of registered datasets. If name is specified, only registered parameters of the respective dataset are returned. Returns ------- dict: A dict of all the valid keyword parameters names for the specified dataset. If name is set to None, returns a dict mapping each valid name to its respective keyword parameter dict. The valid names can be plugged in `gluonnlp.model.word_evaluation_model.create(name)`.
def sctiks(sc, clkstr): sc = ctypes.c_int(sc) clkstr = stypes.stringToCharP(clkstr) ticks = ctypes.c_double() libspice.sctiks_c(sc, clkstr, ctypes.byref(ticks)) return ticks.value
Convert a spacecraft clock format string to number of "ticks". http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sctiks_c.html :param sc: NAIF spacecraft identification code. :type sc: int :param clkstr: Character representation of a spacecraft clock. :type clkstr: str :return: Number of ticks represented by the clock string. :rtype: float
def reference(self, ): tfi = self.get_taskfileinfo_selection() if tfi: self.reftrack.reference(tfi)
Reference a file :returns: None :rtype: None :raises: None
def get_lists(client): response = client.authenticated_request(client.api.Endpoints.LISTS) return response.json()
Gets all the client's lists
def as_dict(self): if not self._is_valid: self.validate() from .converters import to_dict return to_dict(self)
Returns the model as a dict
def init_tape(self, string): for char in string: if char not in self.alphabet and not char.isspace() and char != self.EMPTY_SYMBOL: raise RuntimeError('Invalid symbol: "' + char + '"') self.check() self.state = self.START_STATE self.head = 0 self.tape = {} for i in range(len(string)): symbol = string[i] if not string[i].isspace() else self.EMPTY_SYMBOL self.tape[i] = symbol
Init system values.
def sha1(s): h = hashlib.new('sha1') h.update(s) return h.hexdigest()
Returns a sha1 of the given string
def parse_age(value=None): if not value: return None try: seconds = int(value) except ValueError: return None if seconds < 0: return None try: return timedelta(seconds=seconds) except OverflowError: return None
Parses a base-10 integer count of seconds into a timedelta. If parsing fails, the return value is `None`. :param value: a string consisting of an integer represented in base-10 :return: a :class:`datetime.timedelta` object or `None`.
def get_arguments(): parser = argparse.ArgumentParser( description='Handles bumping of the artifact version') parser.add_argument('--log-config', '-l', action='store', dest='logger_config', help='The location of the logging config json file', default='') parser.add_argument('--log-level', '-L', help='Provide the log level. Defaults to INFO.', dest='log_level', action='store', default='INFO', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']) parser.add_argument('--major', help='Bump the major version', dest='bump_major', action='store_true', default=False) parser.add_argument('--minor', help='Bump the minor version', dest='bump_minor', action='store_true', default=False) parser.add_argument('--patch', help='Bump the patch version', dest='bump_patch', action='store_true', default=False) parser.add_argument('--version', help='Set the version', dest='version', action='store', default=False) args = parser.parse_args() return args
This get us the cli arguments. Returns the args as parsed from the argsparser.
def _check_timezone_max_length_attribute(self): possible_max_length = max(map(len, pytz.all_timezones)) if self.max_length < possible_max_length: return [ checks.Error( msg=( "'max_length' is too short to support all possible " "pytz time zones." ), hint=( "pytz {version}'s longest time zone string has a " "length of {value}, although it is recommended that " "you leave room for longer time zone strings to be " "added in the future.".format( version=pytz.VERSION, value=possible_max_length ) ), obj=self, ) ] return []
Checks that the `max_length` attribute covers all possible pytz timezone lengths.
def connect(self, funct): def get_directory(): rec = QFileDialog.getExistingDirectory(self, 'Path to Recording' ' Directory') if rec == '': return self.setText(rec) funct() self.clicked.connect(get_directory)
Call funct when the text was changed. Parameters ---------- funct : function function that broadcasts a change. Notes ----- There is something wrong here. When you run this function, it calls for opening a directory three or four times. This is obviously wrong but I don't understand why this happens three times. Traceback did not help.
def list_container_services(access_token, subscription_id, resource_group): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerService/ContainerServices', '?api-version=', ACS_API]) return do_get(endpoint, access_token)
List the container services in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON model.
def set_variations(self, variations): if variations is None: variations = ffi.NULL else: variations = _encode_string(variations) cairo.cairo_font_options_set_variations(self._pointer, variations) self._check_status()
Sets the OpenType font variations for the font options object. Font variations are specified as a string with a format that is similar to the CSS font-variation-settings. The string contains a comma-separated list of axis assignments, which each assignment consists of a 4-character axis name and a value, separated by whitespace and optional equals sign. :param variations: the new font variations, or ``None``. *New in cairo 1.16.* *New in cairocffi 0.9.*
def pca(U, centre=False): if centre: C = np.mean(U, axis=1, keepdims=True) U = U - C else: C = None B, S, _ = np.linalg.svd(U, full_matrices=False, compute_uv=True) return B, S**2, C
Compute the PCA basis for columns of input array `U`. Parameters ---------- U : array_like 2D data array with rows corresponding to different variables and columns corresponding to different observations center : bool, optional (default False) Flag indicating whether to centre data Returns ------- B : ndarray A 2D array representing the PCA basis; each column is a PCA component. B.T is the analysis transform into the PCA representation, and B is the corresponding synthesis transform S : ndarray The eigenvalues of the PCA components C : ndarray or None None if centering is disabled, otherwise the mean of the data matrix subtracted in performing the centering
def add_bonus(worker_dict): " Adds DB-logged worker bonus to worker list data " try: unique_id = '{}:{}'.format(worker_dict['workerId'], worker_dict['assignmentId']) worker = Participant.query.filter( Participant.uniqueid == unique_id).one() worker_dict['bonus'] = worker.bonus except sa.exc.InvalidRequestError: worker_dict['bonus'] = 'N/A' return worker_dict
Adds DB-logged worker bonus to worker list data
def _pop(self, block=True, timeout=None, left=False): item = None timer = None deque = self._deque empty = IndexError('pop from an empty deque') if block is False: if len(self._deque) > 0: item = deque.popleft() if left else deque.pop() else: raise empty else: try: if timeout is not None: timer = gevent.Timeout(timeout, empty) timer.start() while True: self.notEmpty.wait() if len(deque) > 0: item = deque.popleft() if left else deque.pop() break finally: if timer is not None: timer.cancel() if len(deque) == 0: self.notEmpty.clear() return item
Removes and returns the an item from this GeventDeque. This is an internal method, called by the public methods pop() and popleft().
def get_stream(self, stream): path = '/archive/{}/streams/{}'.format(self._instance, stream) response = self._client.get_proto(path=path) message = archive_pb2.StreamInfo() message.ParseFromString(response.content) return Stream(message)
Gets a single stream. :param str stream: The name of the stream. :rtype: .Stream
def strtobytes(input, encoding): py_version = sys.version_info[0] if py_version >= 3: return _strtobytes_py3(input, encoding) return _strtobytes_py2(input, encoding)
Take a str and transform it into a byte array.
def newDocFragment(self): ret = libxml2mod.xmlNewDocFragment(self._o) if ret is None:raise treeError('xmlNewDocFragment() failed') __tmp = xmlNode(_obj=ret) return __tmp
Creation of a new Fragment node.
def find_outliers(group, delta): with_pos = sorted([pair for pair in enumerate(group)], key=lambda p: p[1]) outliers_start = outliers_end = -1 for i in range(0, len(with_pos) - 1): cur = with_pos[i][1] nex = with_pos[i + 1][1] if nex - cur > delta: if i < (len(with_pos) - i): outliers_start, outliers_end = 0, i + 1 else: outliers_start, outliers_end = i + 1, len(with_pos) break if outliers_start != -1: return [with_pos[i][0] for i in range(outliers_start, outliers_end)] else: return []
given a list of values, find those that are apart from the rest by `delta`. the indexes for the outliers is returned, if any. examples: values = [100, 6, 7, 8, 9, 10, 150] find_outliers(values, 5) -> [0, 6] values = [5, 6, 5, 4, 5] find_outliers(values, 3) -> []
def stack1d(*points): result = np.empty((2, len(points)), order="F") for index, point in enumerate(points): result[:, index] = point return result
Fill out the columns of matrix with a series of points. This is because ``np.hstack()`` will just make another 1D vector out of them and ``np.vstack()`` will put them in the rows. Args: points (Tuple[numpy.ndarray, ...]): Tuple of 1D points (i.e. arrays with shape ``(2,)``. Returns: numpy.ndarray: The array with each point in ``points`` as its columns.
def append(self, other): if not isinstance(other,StarPopulation): raise TypeError('Only StarPopulation objects can be appended to a StarPopulation.') if not np.all(self.stars.columns == other.stars.columns): raise ValueError('Two populations must have same columns to combine them.') if len(self.constraints) > 0: logging.warning('All constraints are cleared when appending another population.') self.stars = pd.concat((self.stars, other.stars)) if self.orbpop is not None and other.orbpop is not None: self.orbpop = self.orbpop + other.orbpop
Appends stars from another StarPopulations, in place. :param other: Another :class:`StarPopulation`; must have same columns as ``self``.
def _add_property(self, name, default_value): name = str(name) self._properties[name] = default_value
Add a device property with a given default value. Args: name (str): The name of the property to add default_value (int, bool): The value of the property
def dst(self, dt): tt = _localtime(_mktime((dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1))) if tt.tm_isdst > 0: return _dstdiff return _zero
datetime -> DST offset in minutes east of UTC.
def is_invalid_operation(self, callsign, timestamp=datetime.utcnow().replace(tzinfo=UTC)): callsign = callsign.strip().upper() if self._lookuptype == "clublogxml": return self._check_inv_operation_for_date(callsign, timestamp, self._invalid_operations, self._invalid_operations_index) elif self._lookuptype == "redis": data_dict, index = self._get_dicts_from_redis("_inv_op_", "_inv_op_index_", self._redis_prefix, callsign) return self._check_inv_operation_for_date(callsign, timestamp, data_dict, index) raise KeyError
Returns True if an operations is known as invalid Args: callsign (string): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: bool: True if a record exists for this callsign (at the given time) Raises: KeyError: No matching callsign found APIKeyMissingError: API Key for Clublog missing or incorrect Example: The following code checks the Clublog XML database if the operation is valid for two dates. >>> from pyhamtools import LookupLib >>> from datetime import datetime >>> import pytz >>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey") >>> print my_lookuplib.is_invalid_operation("5W1CFN") True >>> try: >>> timestamp = datetime(year=2012, month=1, day=31).replace(tzinfo=pytz.UTC) >>> my_lookuplib.is_invalid_operation("5W1CFN", timestamp) >>> except KeyError: >>> print "Seems to be invalid operation before 31.1.2012" Seems to be an invalid operation before 31.1.2012 Note: This method is available for - clublogxml - redis
def get_custom_values(self, key): self._handled.add(key) return self._lookup[key]
Return a set of values for the given customParameter name.
def query(self, query, media=None, year=None, fields=None, extended=None, **kwargs): if not media: warnings.warn( "\"media\" parameter is now required on the Trakt['search'].query() method", DeprecationWarning, stacklevel=2 ) if fields and not media: raise ValueError('"fields" can only be used when the "media" parameter is defined') query = { 'query': query } if year: query['year'] = year if fields: query['fields'] = fields if extended: query['extended'] = extended if isinstance(media, list): media = ','.join(media) response = self.http.get( params=[media], query=query ) items = self.get_data(response, **kwargs) if isinstance(items, requests.Response): return items if items is not None: return SearchMapper.process_many(self.client, items) return None
Search by titles, descriptions, translated titles, aliases, and people. **Note:** Results are ordered by the most relevant score. :param query: Search title or description :type query: :class:`~python:str` :param media: Desired media type (or :code:`None` to return all matching items) **Possible values:** - :code:`movie` - :code:`show` - :code:`episode` - :code:`person` - :code:`list` :type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str` :param year: Desired media year (or :code:`None` to return all matching items) :type year: :class:`~python:str` or :class:`~python:int` :param fields: Fields to search for :code:`query` (or :code:`None` to search all fields) :type fields: :class:`~python:str` or :class:`~python:list` :param extended: Level of information to include in response **Possible values:** - :code:`None`: Minimal (e.g. title, year, ids) **(default)** - :code:`full`: Complete :type extended: :class:`~python:str` :param kwargs: Extra request options :type kwargs: :class:`~python:dict` :return: Results :rtype: :class:`~python:list` of :class:`trakt.objects.media.Media`
def make_gym_env(name, rl_env_max_episode_steps=-1, maxskip_env=False, rendered_env=False, rendered_env_resize_to=None, sticky_actions=False): env = gym.make(name) return gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env, rendered_env, rendered_env_resize_to, sticky_actions)
Create a gym env optionally with a time limit and maxskip wrapper. NOTE: The returned env may already be wrapped with TimeLimit! Args: name: `str` - base name of the gym env to make. rl_env_max_episode_steps: `int` or None - Using any value < 0 returns the env as-in, otherwise we impose the requested timelimit. Setting this to None returns a wrapped env that doesn't have a step limit. maxskip_env: whether to also use MaxAndSkip wrapper before time limit. rendered_env: whether to force render for observations. Use this for environments that are not natively rendering the scene for observations. rendered_env_resize_to: a list of [height, width] to change the original resolution of the native environment render. sticky_actions: whether to use sticky_actions before MaxAndSkip wrapper. Returns: An instance of `gym.Env` or `gym.Wrapper`.
def migrate(self, migrations_package_name, up_to=9999): from .migrations import MigrationHistory logger = logging.getLogger('migrations') applied_migrations = self._get_applied_migrations(migrations_package_name) modules = import_submodules(migrations_package_name) unapplied_migrations = set(modules.keys()) - applied_migrations for name in sorted(unapplied_migrations): logger.info('Applying migration %s...', name) for operation in modules[name].operations: operation.apply(self) self.insert([MigrationHistory(package_name=migrations_package_name, module_name=name, applied=datetime.date.today())]) if int(name[:4]) >= up_to: break
Executes schema migrations. - `migrations_package_name` - fully qualified name of the Python package containing the migrations. - `up_to` - number of the last migration to apply.
def limitsSql(startIndex=0, maxResults=0): if startIndex and maxResults: return " LIMIT {}, {}".format(startIndex, maxResults) elif startIndex: raise Exception("startIndex was provided, but maxResults was not") elif maxResults: return " LIMIT {}".format(maxResults) else: return ""
Construct a SQL LIMIT clause
def _get_network_vswitch_map_by_port_id(self, port_id): for network_id, vswitch in six.iteritems(self._network_vswitch_map): if port_id in vswitch['ports']: return (network_id, vswitch) return (None, None)
Get the vswitch name for the received port id.
def native(self, writeAccess=False, isolation_level=None): host = self.database().writeHost() if writeAccess else self.database().host() conn = self.open(writeAccess=writeAccess) try: if isolation_level is not None: if conn.isolation_level == isolation_level: isolation_level = None else: conn.set_isolation_level(isolation_level) yield conn except Exception: if self._closed(conn): conn = None self.close() else: conn = self._rollback(conn) raise else: if not self._closed(conn): self._commit(conn) finally: if conn is not None and not self._closed(conn): if isolation_level is not None: conn.set_isolation_level(isolation_level) self.__pool[host].put(conn)
Opens a new database connection to the database defined by the inputted database. :return <varaint> native connection
def template(args): " Add or remove templates from site. " site = Site(args.PATH) if args.ACTION == "add": return site.add_template(args.TEMPLATE) return site.remove_template(args.TEMPLATE)
Add or remove templates from site.
def remove(self, point, node=None): if not self: return if self.should_remove(point, node): return self._remove(point) if self.left and self.left.should_remove(point, node): self.left = self.left._remove(point) elif self.right and self.right.should_remove(point, node): self.right = self.right._remove(point) if point[self.axis] <= self.data[self.axis]: if self.left: self.left = self.left.remove(point, node) if point[self.axis] >= self.data[self.axis]: if self.right: self.right = self.right.remove(point, node) return self
Removes the node with the given point from the tree Returns the new root node of the (sub)tree. If there are multiple points matching "point", only one is removed. The optional "node" parameter is used for checking the identity, once the removeal candidate is decided.
def setnode(delta, graph, node, exists): delta.setdefault(graph, {}).setdefault('nodes', {})[node] = bool(exists)
Change a delta to say that a node was created or deleted
def layer_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None, layer_collection=None): if filters is None: filters = shape_list(x)[-1] with tf.variable_scope( name, default_name="layer_norm", values=[x], reuse=reuse): scale, bias = layer_norm_vars(filters) return layer_norm_compute(x, epsilon, scale, bias, layer_collection=layer_collection)
Layer normalize the tensor x, averaging over the last dimension.
def enable( self, cmd="enable", pattern=r"(ssword|User Name)", re_flags=re.IGNORECASE ): output = "" if not self.check_enable_mode(): count = 4 i = 1 while i < count: self.write_channel(self.normalize_cmd(cmd)) new_data = self.read_until_prompt_or_pattern( pattern=pattern, re_flags=re_flags ) output += new_data if "User Name" in new_data: self.write_channel(self.normalize_cmd(self.username)) new_data = self.read_until_prompt_or_pattern( pattern=pattern, re_flags=re_flags ) output += new_data if "ssword" in new_data: self.write_channel(self.normalize_cmd(self.secret)) output += self.read_until_prompt() return output time.sleep(1) i += 1 if not self.check_enable_mode(): msg = ( "Failed to enter enable mode. Please ensure you pass " "the 'secret' argument to ConnectHandler." ) raise ValueError(msg)
Enter enable mode. With RADIUS can prompt for User Name SSH@Lab-ICX7250>en User Name:service_netmiko Password: SSH@Lab-ICX7250#
def delete(self): res = requests.delete(url=self.record_url, headers=HEADERS, verify=False) if res.status_code == 204: return {} return res.json()
Deletes the record.
def get_special_folder(self, name): name = name if \ isinstance(name, OneDriveWellKnowFolderNames) \ else OneDriveWellKnowFolderNames(name.lower()) name = name.value if self.object_id: url = self.build_url( self._endpoints.get('get_special').format(id=self.object_id, name=name)) else: url = self.build_url( self._endpoints.get('get_special_default').format(name=name)) response = self.con.get(url) if not response: return None data = response.json() return self._classifier(data)(parent=self, **{self._cloud_data_key: data})
Returns the specified Special Folder :return: a special Folder :rtype: drive.Folder
def add_passwords(self, identifiers, passwords): if not isinstance(identifiers, list): raise TypeError("identifiers can only be an instance of type list") for a in identifiers[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") if not isinstance(passwords, list): raise TypeError("passwords can only be an instance of type list") for a in passwords[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") self._call("addPasswords", in_p=[identifiers, passwords])
Adds a list of passwords required to import or export encrypted virtual machines. in identifiers of type str List of identifiers. in passwords of type str List of matching passwords.
def mutate(self, node, index): assert index < len(OFFSETS), 'received count with no associated offset' assert isinstance(node, parso.python.tree.Number) val = eval(node.value) + OFFSETS[index] return parso.python.tree.Number(' ' + str(val), node.start_pos)
Modify the numeric value on `node`.
def delete_user_role(self, user, role): self.project_service.set_auth(self._token_project) self.project_service.delete_user_role(user, role)
Remove role from given user. Args: user (string): User name. role (string): Role to remove. Raises: requests.HTTPError on failure.
def rt(nu, size=None): return rnormal(0, 1, size) / np.sqrt(rchi2(nu, size) / nu)
Student's t random variates.
def _get_packet(self, socket): data, (ip, port) = socket.recvfrom(self._buffer_size) packet, remainder = self._unpack(data) self.inbox.put((ip, port, packet)) self.new_packet.set() self.debug(u"RX: {}".format(packet)) if packet.header.sequence_number is not None: self._send_ack(ip, port, packet) ack_seq = packet.header.ack_sequence_number if ack_seq is not None: with self._seq_ack_lock: if ack_seq in self._seq_ack: self.debug(u"Seq {} got acked".format(ack_seq)) self._seq_ack.remove(ack_seq) return packet
Read packet and put it into inbox :param socket: Socket to read from :type socket: socket.socket :return: Read packet :rtype: APPMessage
def find_by_name(self, term: str, include_placeholders: bool = False) -> List[Account]: query = ( self.query .filter(Account.name.like('%' + term + '%')) .order_by(Account.name) ) if not include_placeholders: query = query.filter(Account.placeholder == 0) return query.all()
Search for account by part of the name
def _prerun(self): self.check_required_params() self._set_status("RUNNING") logger.debug( "{}.PreRun: {}[{}]: running...".format( self.__class__.__name__, self.__class__.path, self.uuid ), extra=dict( kmsg=Message( self.uuid, entrypoint=self.__class__.path, params=self.params ).dump() ) ) return self.prerun()
To execute before running message
def set_substitution(self, what, rep): if rep is None: if what in self._subs: del self._subs[what] self._subs[what] = rep
Set a substitution. Equivalent to ``! sub`` in RiveScript code. :param str what: The original text to replace. :param str rep: The text to replace it with. Set this to ``None`` to delete the substitution.
def entrez_sets_of_results(url, retstart=False, retmax=False, count=False) -> Optional[List[requests.Response]]: if not retstart: retstart = 0 if not retmax: retmax = 500 if not count: count = retmax retmax = 500 while retstart < count: diff = count - retstart if diff < 500: retmax = diff _url = url + f'&retstart={retstart}&retmax={retmax}' resp = entrez_try_get_multiple_times(_url) if resp is None: return retstart += retmax yield resp
Gets sets of results back from Entrez. Entrez can only return 500 results at a time. This creates a generator that gets results by incrementing retstart and retmax. Parameters ---------- url : str The Entrez API url to use. retstart : int Return values starting at this index. retmax : int Return at most this number of values. count : int The number of results returned by EQuery. Yields ------ requests.Response
def _set_lim_and_transforms(self): LambertAxes._set_lim_and_transforms(self) yaxis_stretch = Affine2D().scale(4 * self.horizon, 1.0) yaxis_stretch = yaxis_stretch.translate(-self.horizon, 0.0) yaxis_space = Affine2D().scale(1.0, 1.1) self._yaxis_transform = \ yaxis_stretch + \ self.transData yaxis_text_base = \ yaxis_stretch + \ self.transProjection + \ (yaxis_space + \ self.transAffine + \ self.transAxes) self._yaxis_text1_transform = \ yaxis_text_base + \ Affine2D().translate(-8.0, 0.0) self._yaxis_text2_transform = \ yaxis_text_base + \ Affine2D().translate(8.0, 0.0)
Setup the key transforms for the axes.
def children_as_pi(self, squash=False): probs = self.child_N if squash: probs = probs ** .98 sum_probs = np.sum(probs) if sum_probs == 0: return probs return probs / np.sum(probs)
Returns the child visit counts as a probability distribution, pi If squash is true, exponentiate the probabilities by a temperature slightly larger than unity to encourage diversity in early play and hopefully to move away from 3-3s
def emit(self, record): try: QgsMessageLog.logMessage(record.getMessage(), 'InaSAFE', 0) except MemoryError: message = tr( 'Due to memory limitations on this machine, InaSAFE can not ' 'handle the full log') print(message) QgsMessageLog.logMessage(message, 'InaSAFE', 0)
Try to log the message to QGIS if available, otherwise do nothing. :param record: logging record containing whatever info needs to be logged.
def _pdb_frame(self): if self._pdb_obj is not None and self._pdb_obj.curframe is not None: return self._pdb_obj.curframe
Return current Pdb frame if there is any
def say_tmp_filepath( text = None, preference_program = "festival" ): filepath = shijian.tmp_filepath() + ".wav" say( text = text, preference_program = preference_program, filepath = filepath ) return filepath
Say specified text to a temporary file and return the filepath.
def gdal_rasterize(src, dst, options): out = gdal.Rasterize(dst, src, options=gdal.RasterizeOptions(**options)) out = None
a simple wrapper for gdal.Rasterize Parameters ---------- src: str or :osgeo:class:`ogr.DataSource` the input data set dst: str the output data set options: dict additional parameters passed to gdal.Rasterize; see :osgeo:func:`gdal.RasterizeOptions` Returns -------
def layout_asides(self, block, context, frag, view_name, aside_frag_fns): result = Fragment(frag.content) result.add_fragment_resources(frag) for aside, aside_fn in aside_frag_fns: aside_frag = self.wrap_aside(block, aside, view_name, aside_fn(block, context), context) aside.save() result.add_content(aside_frag.content) result.add_fragment_resources(aside_frag) return result
Execute and layout the aside_frags wrt the block's frag. Runtimes should feel free to override this method to control execution, place, and style the asides appropriately for their application This default method appends the aside_frags after frag. If you override this, you must call wrap_aside around each aside as per this function. Args: block (XBlock): the block being rendered frag (html): The result from rendering the block aside_frag_fns list((aside, aside_fn)): The asides and closures for rendering to call
def __get_factory_with_context(self, factory_name): factory = self.__factories.get(factory_name) if factory is None: raise TypeError("Unknown factory '{0}'".format(factory_name)) factory_context = getattr( factory, constants.IPOPO_FACTORY_CONTEXT, None ) if factory_context is None: raise TypeError( "Factory context missing in '{0}'".format(factory_name) ) return factory, factory_context
Retrieves the factory registered with the given and its factory context :param factory_name: The name of the factory :return: A (factory, context) tuple :raise TypeError: Unknown factory, or factory not manipulated
def _load_settings(self): if self._autosettings_path == None: return gui_settings_dir = _os.path.join(_cwd, 'egg_settings') path = _os.path.join(gui_settings_dir, self._autosettings_path) if not _os.path.exists(path): return settings = _g.QtCore.QSettings(path, _g.QtCore.QSettings.IniFormat) if settings.contains('State') and hasattr_safe(self._window, "restoreState"): x = settings.value('State') if hasattr(x, "toByteArray"): x = x.toByteArray() self._window.restoreState(x) if settings.contains('Geometry'): x = settings.value('Geometry') if hasattr(x, "toByteArray"): x = x.toByteArray() self._window.restoreGeometry(x)
Loads all the parameters from a databox text file. If path=None, loads from self._autosettings_path.
def show_ipsecpolicy(self, ipsecpolicy, **_params): return self.get(self.ipsecpolicy_path % (ipsecpolicy), params=_params)
Fetches information of a specific IPsecPolicy.
def reset(self, force): client = self.create_client() bucket = client.lookup_bucket(self.bucket_name) if bucket is not None: if not force: self._log.error("Bucket already exists, aborting.") raise ExistingBackendError self._log.info("Bucket already exists, deleting all content.") for blob in bucket.list_blobs(): self._log.info("Deleting %s ..." % blob.name) bucket.delete_blob(blob.name) else: client.create_bucket(self.bucket_name)
Connect to the assigned bucket or create if needed. Clear all the blobs inside.
def change_ref(self, r0=None, lmax=None): if lmax is None: lmax = self.lmax clm = self.pad(lmax) if r0 is not None and r0 != self.r0: for l in _np.arange(lmax+1): clm.coeffs[:, l, :l+1] *= (self.r0 / r0)**(l+2) if self.errors is not None: clm.errors[:, l, :l+1] *= (self.r0 / r0)**(l+2) clm.r0 = r0 return clm
Return a new SHMagCoeffs class instance with a different reference r0. Usage ----- clm = x.change_ref([r0, lmax]) Returns ------- clm : SHMagCoeffs class instance. Parameters ---------- r0 : float, optional, default = self.r0 The reference radius of the spherical harmonic coefficients. lmax : int, optional, default = self.lmax Maximum spherical harmonic degree to output. Description ----------- This method returns a new class instance of the magnetic potential, but using a difference reference r0. When changing the reference radius r0, the spherical harmonic coefficients will be upward or downward continued under the assumption that the reference radius is exterior to the body.
def str_to_list( input_str, item_converter=lambda x: x, item_separator=',', list_to_collection_converter=None, ): if not isinstance(input_str, six.string_types): raise ValueError(input_str) input_str = str_quote_stripper(input_str) result = [ item_converter(x.strip()) for x in input_str.split(item_separator) if x.strip() ] if list_to_collection_converter is not None: return list_to_collection_converter(result) return result
a conversion function for list
def fit(self, y, **kwargs): if y.ndim > 1: raise YellowbrickValueError("y needs to be an array or Series with one dimension") if self.target is None: self.target = 'Frequency' self.draw(y) return self
Sets up y for the histogram and checks to ensure that ``y`` is of the correct data type. Fit calls draw. Parameters ---------- y : an array of one dimension or a pandas Series kwargs : dict keyword arguments passed to scikit-learn API.
def tospark(self, engine=None): from thunder.series.readers import fromarray if self.mode == 'spark': logging.getLogger('thunder').warn('images already in local mode') pass if engine is None: raise ValueError('Must provide SparkContext') return fromarray(self.toarray(), index=self.index, labels=self.labels, engine=engine)
Convert to spark mode.
def get_throttled_by_consumed_read_percent( table_name, lookback_window_start=15, lookback_period=5): try: metrics1 = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'ConsumedReadCapacityUnits') metrics2 = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'ReadThrottleEvents') except BotoServerError: raise if metrics1 and metrics2: lookback_seconds = lookback_period * 60 throttled_by_consumed_read_percent = ( ( (float(metrics2[0]['Sum']) / float(lookback_seconds)) / (float(metrics1[0]['Sum']) / float(lookback_seconds)) ) * 100) else: throttled_by_consumed_read_percent = 0 logger.info('{0} - Throttled read percent by consumption: {1:.2f}%'.format( table_name, throttled_by_consumed_read_percent)) return throttled_by_consumed_read_percent
Returns the number of throttled read events in percent of consumption :type table_name: str :param table_name: Name of the DynamoDB table :type lookback_window_start: int :param lookback_window_start: Relative start time for the CloudWatch metric :type lookback_period: int :param lookback_period: Number of minutes to look at :returns: float -- Percent of throttled read events by consumption
def _output_file_data(self, outfp, blocksize, ino): log_block_size = self.pvd.logical_block_size() outfp.seek(ino.extent_location() * log_block_size) tmp_start = outfp.tell() with inode.InodeOpenData(ino, log_block_size) as (data_fp, data_len): utils.copy_data(data_len, blocksize, data_fp, outfp) utils.zero_pad(outfp, data_len, log_block_size) if self._track_writes: end = outfp.tell() bisect.insort_left(self._write_check_list, self._WriteRange(tmp_start, end - 1)) if ino.boot_info_table is not None: old = outfp.tell() outfp.seek(tmp_start + 8) self._outfp_write_with_check(outfp, ino.boot_info_table.record(), enable_overwrite_check=False) outfp.seek(old) return outfp.tell() - tmp_start
Internal method to write a directory record entry out. Parameters: outfp - The file object to write the data to. blocksize - The blocksize to use when writing the data out. ino - The Inode to write. Returns: The total number of bytes written out.
def hline(self, x, y, width, color): self.rect(x, y, width, 1, color, fill=True)
Draw a horizontal line up to a given length.
def _load_config(self, path): p = os.path.abspath(os.path.expanduser(path)) logger.debug('Loading configuration from: %s', p) return read_json_file(p)
Load configuration from JSON :param path: path to the JSON config file :type path: str :return: config dictionary :rtype: dict
def resample( self, rule: Union[str, int] = "1s", max_workers: int = 4, ) -> "Traffic": with ProcessPoolExecutor(max_workers=max_workers) as executor: cumul = [] tasks = { executor.submit(flight.resample, rule): flight for flight in self } for future in tqdm(as_completed(tasks), total=len(tasks)): cumul.append(future.result()) return self.__class__.from_flights(cumul)
Resamples all trajectories, flight by flight. `rule` defines the desired sample rate (default: 1s)
def check_empty_response(self, orig_request, method_config, start_response): response_config = method_config.get('response', {}).get('body') if response_config == 'empty': cors_handler = self._create_cors_handler(orig_request) return util.send_wsgi_no_content_response(start_response, cors_handler)
If the response from the backend is empty, return a HTTP 204 No Content. Args: orig_request: An ApiRequest, the original request from the user. method_config: A dict, the API config of the method to be called. start_response: A function with semantics defined in PEP-333. Returns: If the backend response was empty, this returns a string containing the response body that should be returned to the user. If the backend response wasn't empty, this returns None, indicating that we should not exit early with a 204.
def item_details(item_id, lang="en"): params = {"item_id": item_id, "lang": lang} cache_name = "item_details.%(item_id)s.%(lang)s.json" % params return get_cached("item_details.json", cache_name, params=params)
This resource returns a details about a single item. :param item_id: The item to query for. :param lang: The language to display the texts in. The response is an object with at least the following properties. Note that the availability of some properties depends on the type of the item. item_id (number): The item id. name (string): The name of the item. description (string): The item description. type (string): The item type. level (integer): The required level. rarity (string): The rarity. On of ``Junk``, ``Basic``, ``Fine``, ``Masterwork``, ``Rare``, ``Exotic``, ``Ascended`` or ``Legendary``. vendor_value (integer): The value in coins when selling to a vendor. icon_file_id (string): The icon file id to be used with the render service. icon_file_signature (string): The icon file signature to be used with the render service. game_types (list): The game types where the item is usable. Currently known game types are: ``Activity``, ``Dungeon``, ``Pve``, ``Pvp``, ``PvpLobby`` and ``WvW`` flags (list): Additional item flags. Currently known item flags are: ``AccountBound``, ``HideSuffix``, ``NoMysticForge``, ``NoSalvage``, ``NoSell``, ``NotUpgradeable``, ``NoUnderwater``, ``SoulbindOnAcquire``, ``SoulBindOnUse`` and ``Unique`` restrictions (list): Race restrictions: ``Asura``, ``Charr``, ``Human``, ``Norn`` and ``Sylvari``. Each item type has an `additional key`_ with information specific to that item type. .. _additional key: item-properties.html
def collapse_indents(indentation): change_in_level = ind_change(indentation) if change_in_level == 0: indents = "" elif change_in_level < 0: indents = closeindent * (-change_in_level) else: indents = openindent * change_in_level return indentation.replace(openindent, "").replace(closeindent, "") + indents
Removes all openindent-closeindent pairs.
def fuzzy(self, key, limit=5): instances = [i[2] for i in self.container if i[2]] if not instances: return instances = sum(instances, []) from fuzzywuzzy import process maybe = process.extract(key, instances, limit=limit) return maybe
Give suggestion from all instances.
def visit_Break(self, _): if self.break_handlers and self.break_handlers[-1]: return Statement("goto {0}".format(self.break_handlers[-1])) else: return Statement("break")
Generate break statement in most case and goto for orelse clause. See Also : cxx_loop
def _preprocess_add_items(self, items): paths = [] entries = [] for item in items: if isinstance(item, string_types): paths.append(self._to_relative_path(item)) elif isinstance(item, (Blob, Submodule)): entries.append(BaseIndexEntry.from_blob(item)) elif isinstance(item, BaseIndexEntry): entries.append(item) else: raise TypeError("Invalid Type: %r" % item) return (paths, entries)
Split the items into two lists of path strings and BaseEntries.
def FlushShortIdRecords(site_service): szService = c_char_p(site_service.encode('utf-8')) szMessage = create_string_buffer(b" ") nMessage = c_ushort(20) nRet = dnaserv_dll.DnaFlushShortIdRecords(szService, byref(szMessage), nMessage) return str(nRet) + szMessage.value.decode('utf-8')
Flush all the queued records. :param site_service: The site.service where data was pushed :return: message whether function was successful
def delete_minion_cachedir(minion_id, provider, opts, base=None): if isinstance(opts, dict): __opts__.update(opts) if __opts__.get('update_cachedir', False) is False: return if base is None: base = __opts__['cachedir'] driver = next(six.iterkeys(__opts__['providers'][provider])) fname = '{0}.p'.format(minion_id) for cachedir in 'requested', 'active': path = os.path.join(base, cachedir, driver, provider, fname) log.debug('path: %s', path) if os.path.exists(path): os.remove(path)
Deletes a minion's entry from the cloud cachedir. It will search through all cachedirs to find the minion's cache file. Needs `update_cachedir` set to True.
def handle_invocation(self, message): req_id = message.request_id reg_id = message.registration_id if reg_id in self._registered_calls: handler = self._registered_calls[reg_id][REGISTERED_CALL_CALLBACK] invoke = WampInvokeWrapper(self,handler,message) invoke.start() else: error_uri = self.get_full_uri('error.unknown.uri') self.send_message(ERROR( request_code = WAMP_INVOCATION, request_id = req_id, details = {}, error =error_uri ))
Passes the invocation request to the appropriate callback.
def get_full_url(self, parsed_url): full_path = parsed_url.path if parsed_url.query: full_path = '%s?%s' % (full_path, parsed_url.query) return full_path
Returns url path with querystring
def printArchive(fileName): archive = CombineArchive() if archive.initializeFromArchive(fileName) is None: print("Invalid Combine Archive") return None print('*'*80) print('Print archive:', fileName) print('*' * 80) printMetaDataFor(archive, ".") print("Num Entries: {0}".format(archive.getNumEntries())) for i in range(archive.getNumEntries()): entry = archive.getEntry(i) print(" {0}: location: {1} format: {2}".format(i, entry.getLocation(), entry.getFormat())) printMetaDataFor(archive, entry.getLocation()) for j in range(entry.getNumCrossRefs()): print(" {0}: crossRef location {1}".format(j, entry.getCrossRef(j).getLocation())) archive.cleanUp()
Prints content of combine archive :param fileName: path of archive :return: None
def _get_module(target): filepath, sep, namespace = target.rpartition('|') if sep and not filepath: raise BadDirectory("Path to file not supplied.") module, sep, class_or_function = namespace.rpartition(':') if (sep and not module) or (filepath and not module): raise MissingModule("Need a module path for %s (%s)" % (namespace, target)) if filepath and filepath not in sys.path: if not os.path.isdir(filepath): raise BadDirectory("No such directory: '%s'" % filepath) sys.path.append(filepath) if not class_or_function: raise MissingMethodOrFunction( "No Method or Function specified in '%s'" % target) if module: try: __import__(module) except ImportError as e: raise ImportFailed("Failed to import '%s'. " "Error: %s" % (module, e)) klass, sep, function = class_or_function.rpartition('.') return module, klass, function
Import a named class, module, method or function. Accepts these formats: ".../file/path|module_name:Class.method" ".../file/path|module_name:Class" ".../file/path|module_name:function" "module_name:Class" "module_name:function" "module_name:Class.function" If a fully qualified directory is specified, it implies the directory is not already on the Python Path, in which case it will be added. For example, if I import /home/foo (and /home/foo is not in the python path) as "/home/foo|mycode:MyClass.mymethod" then /home/foo will be added to the python path and the module loaded as normal.
def numval(token): if token.type == 'INTEGER': return int(token.value) elif token.type == 'FLOAT': return float(token.value) else: return token.value
Return the numerical value of token.value if it is a number
def resource(self, uri, methods=frozenset({'GET'}), **kwargs): def decorator(f): if kwargs.get('stream'): f.is_stream = kwargs['stream'] self.add_resource(f, uri=uri, methods=methods, **kwargs) return decorator
Decorates a function to be registered as a resource route. :param uri: path of the URL :param methods: list or tuple of methods allowed :param host: :param strict_slashes: :param stream: :param version: :param name: user defined route name for url_for :param filters: List of callable that will filter request and response data :param validators: List of callable added to the filter list. :return: A decorated function
def read_stdin(self): text = sys.stdin.read() if sys.version_info[0] < 3 and text is not None: text = text.decode(sys.stdin.encoding or 'utf-8') return text
Reads STDIN until the end of input and returns a unicode string.
def default( self, o ): if isinstance(o, datetime): return o.isoformat() else: if isinstance(o, Exception): return str(o) else: if isinstance(o, numpy.integer): return int(o) else: return super(MetadataEncoder, self).default(o)
If o is a datetime object, convert it to an ISO string. If it is an exception, convert it to a string. If it is a numpy int, coerce it to a Python int. :param o: the field to serialise :returns: a string encoding of the field
def set_app_args(self, *args): if args: self._set('pyargv', ' '.join(args)) return self._section
Sets ``sys.argv`` for python apps. Examples: * pyargv="one two three" will set ``sys.argv`` to ``('one', 'two', 'three')``. :param args:
def _class(self): try: self._project_name() except ValueError: return MalformedReq if self._is_satisfied(): return SatisfiedReq if not self._expected_hashes(): return MissingReq if self._actual_hash() not in self._expected_hashes(): return MismatchedReq return InstallableReq
Return the class I should be, spanning a continuum of goodness.
def reply(self, text): data = {'text': text, 'vchannel_id': self['vchannel_id']} if self.is_p2p(): data['type'] = RTMMessageType.P2PMessage data['to_uid'] = self['uid'] else: data['type'] = RTMMessageType.ChannelMessage data['channel_id'] = self['channel_id'] return RTMMessage(data)
Replys a text message Args: text(str): message content Returns: RTMMessage
def from_pandas_dataframe(cls, bqm_df, offset=0.0, interactions=None): if interactions is None: interactions = [] bqm = cls({}, {}, offset, Vartype.BINARY) for u, row in bqm_df.iterrows(): for v, bias in row.iteritems(): if u == v: bqm.add_variable(u, bias) elif bias: bqm.add_interaction(u, v, bias) for u, v in interactions: bqm.add_interaction(u, v, 0.0) return bqm
Create a binary quadratic model from a QUBO model formatted as a pandas DataFrame. Args: bqm_df (:class:`pandas.DataFrame`): Quadratic unconstrained binary optimization (QUBO) model formatted as a pandas DataFrame. Row and column indices label the QUBO variables; values are QUBO coefficients. offset (optional, default=0.0): Constant offset for the binary quadratic model. interactions (iterable, optional, default=[]): Any additional 0.0-bias interactions to be added to the binary quadratic model. Returns: :class:`.BinaryQuadraticModel`: Binary quadratic model with vartype set to :class:`vartype.BINARY`. Examples: This example creates a binary quadratic model from a QUBO in pandas DataFrame format while adding an interaction and setting a constant offset. >>> import dimod >>> import pandas as pd >>> pd_qubo = pd.DataFrame(data={0: [-1, 0], 1: [2, -1]}) >>> pd_qubo 0 1 0 -1 2 1 0 -1 >>> model = dimod.BinaryQuadraticModel.from_pandas_dataframe(pd_qubo, ... offset = 2.5, ... interactions = {(0,2), (1,2)}) >>> model.linear # doctest: +SKIP {0: -1, 1: -1.0, 2: 0.0} >>> model.quadratic # doctest: +SKIP {(0, 1): 2, (0, 2): 0.0, (1, 2): 0.0} >>> model.offset 2.5 >>> model.vartype <Vartype.BINARY: frozenset({0, 1})>
def async_update(self, event, reason={}): reason['attr'] = [] for data in ['state', 'config']: changed_attr = self.update_attr(event.get(data, {})) reason[data] = data in event reason['attr'] += changed_attr super().async_update(event, reason)
New event for sensor. Check if state or config is part of event. Signal that sensor has updated attributes. Inform what attributes got changed values.
def from_binary(cls,pst,filename): m = Matrix.from_binary(filename) return ObservationEnsemble(data=m.x,pst=pst, index=m.row_names)
instantiate an observation obsemble from a jco-type file Parameters ---------- pst : pyemu.Pst a Pst instance filename : str the binary file name Returns ------- oe : ObservationEnsemble
def _stream(self, char): num = ord(char) if num in self.basic: self.dispatch(self.basic[num]) elif num == ctrl.ESC: self.state = "escape" elif num == 0x00: pass else: self.dispatch("print", char)
Process a character when in the default 'stream' state.
def _FormatDateTime(self, event): try: datetime_object = datetime.datetime( 1970, 1, 1, 0, 0, 0, 0, tzinfo=pytz.UTC) datetime_object += datetime.timedelta(microseconds=event.timestamp) datetime_object.astimezone(self._output_mediator.timezone) return datetime_object.replace(tzinfo=None) except (OverflowError, ValueError) as exception: self._ReportEventError(event, ( 'unable to copy timestamp: {0!s} to a human readable date and time ' 'with error: {1!s}. Defaulting to: "ERROR"').format( event.timestamp, exception)) return 'ERROR'
Formats the date to a datetime object without timezone information. Note: timezone information must be removed due to lack of support by xlsxwriter and Excel. Args: event (EventObject): event. Returns: datetime.datetime|str: date and time value or a string containing "ERROR" on OverflowError.
def list_media_services(access_token, subscription_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/microsoft.media/mediaservices?api-version=', MEDIA_API]) return do_get(endpoint, access_token)
List the media services in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body.
async def delete(self): if self.id == self._origin.Fabric._default_fabric_id: raise CannotDelete("Default fabric cannot be deleted.") await self._handler.delete(id=self.id)
Delete this Fabric.
def class_balancing_sampler(y, indices): weights = WeightedSampler.class_balancing_sample_weights(y[indices]) return WeightedSubsetSampler(weights, indices=indices)
Construct a `WeightedSubsetSampler` that compensates for class imbalance. Parameters ---------- y: NumPy array, 1D dtype=int sample classes, values must be 0 or positive indices: NumPy array, 1D dtype=int An array of indices that identify the subset of samples drawn from data that are to be used Returns ------- WeightedSubsetSampler instance Sampler
def GetLoadedModuleBySuffix(path): root = os.path.splitext(path)[0] for module in sys.modules.values(): mod_root = os.path.splitext(getattr(module, '__file__', None) or '')[0] if not mod_root: continue if not os.path.isabs(mod_root): mod_root = os.path.join(os.getcwd(), mod_root) if IsPathSuffix(mod_root, root): return module return None
Searches sys.modules to find a module with the given file path. Args: path: Path to the source file. It can be relative or absolute, as suffix match can handle both. If absolute, it must have already been sanitized. Algorithm: The given path must be a full suffix of a loaded module to be a valid match. File extensions are ignored when performing suffix match. Example: path: 'a/b/c.py' modules: {'a': 'a.py', 'a.b': 'a/b.py', 'a.b.c': 'a/b/c.pyc'] returns: module('a.b.c') Returns: The module that corresponds to path, or None if such module was not found.