text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Decorator that makes a method cached. <END_TASK> <USER_TASK:> Description: def _cached(f): """ Decorator that makes a method cached."""
attr_name = '_cached_' + f.__name__ def wrapper(obj, *args, **kwargs): if not hasattr(obj, attr_name): setattr(obj, attr_name, f(obj, *args, **kwargs)) return getattr(obj, attr_name) return wrapper
<SYSTEM_TASK:> Keep only related model fields. <END_TASK> <USER_TASK:> Description: def _filter_child_model_fields(cls, fields): """ Keep only related model fields. Example: Inherited models: A -> B -> C B has one-to-many relationship to BMany. after inspection BMany would have links to B and C. Keep only B. Parent model A could not be used (It would not be in fields) :param list fields: model fields. :return list fields: filtered fields. """
indexes_to_remove = set([]) for index1, field1 in enumerate(fields): for index2, field2 in enumerate(fields): if index1 < index2 and index1 not in indexes_to_remove and\ index2 not in indexes_to_remove: if issubclass(field1.related_model, field2.related_model): indexes_to_remove.add(index1) if issubclass(field2.related_model, field1.related_model): indexes_to_remove.add(index2) fields = [field for index, field in enumerate(fields) if index not in indexes_to_remove] return fields
<SYSTEM_TASK:> This endpoint appears to be required in order to keep pubnub updates flowing for some user. <END_TASK> <USER_TASK:> Description: def post_session(): """ This endpoint appears to be required in order to keep pubnub updates flowing for some user. This just posts a random nonce to the /users/me/session endpoint and returns the result. """
url_string = "{}/users/me/session".format(WinkApiInterface.BASE_URL) nonce = ''.join([str(random.randint(0, 9)) for i in range(9)]) _json = {"nonce": str(nonce)} try: arequest = requests.post(url_string, data=json.dumps(_json), headers=API_HEADERS) response_json = arequest.json() return response_json except requests.exceptions.RequestException: return None
<SYSTEM_TASK:> Set device state via online API. <END_TASK> <USER_TASK:> Description: def set_device_state(self, device, state, id_override=None, type_override=None): """ Set device state via online API. Args: device (WinkDevice): The device the change is being requested for. state (Dict): The state being requested. id_override (String, optional): A device ID used to override the passed in device's ID. Used to make changes on sub-devices. i.e. Outlet in a Powerstrip. The Parent device's ID. type_override (String, optional): Used to override the device type when a device inherits from a device other than WinkDevice. Returns: response_json (Dict): The API's response in dictionary format """
_LOGGER.info("Setting state via online API") object_id = id_override or device.object_id() object_type = type_override or device.object_type() url_string = "{}/{}s/{}".format(self.BASE_URL, object_type, object_id) if state is None or object_type == "group": url_string += "/activate" if state is None: arequest = requests.post(url_string, headers=API_HEADERS) else: arequest = requests.post(url_string, data=json.dumps(state), headers=API_HEADERS) else: arequest = requests.put(url_string, data=json.dumps(state), headers=API_HEADERS) if arequest.status_code == 401: new_token = refresh_access_token() if new_token: arequest = requests.put(url_string, data=json.dumps(state), headers=API_HEADERS) else: raise WinkAPIException("Failed to refresh access token.") response_json = arequest.json() _LOGGER.debug('%s', response_json) return response_json
<SYSTEM_TASK:> Set device state via local API, and fall back to online API. <END_TASK> <USER_TASK:> Description: def local_set_state(self, device, state, id_override=None, type_override=None): """ Set device state via local API, and fall back to online API. Args: device (WinkDevice): The device the change is being requested for. state (Dict): The state being requested. id_override (String, optional): A device ID used to override the passed in device's ID. Used to make changes on sub-devices. i.e. Outlet in a Powerstrip. The Parent device's ID. type_override (String, optional): Used to override the device type when a device inherits from a device other than WinkDevice. Returns: response_json (Dict): The API's response in dictionary format """
if ALLOW_LOCAL_CONTROL: if device.local_id() is not None: hub = HUBS.get(device.hub_id()) if hub is None or hub["token"] is None: return self.set_device_state(device, state, id_override, type_override) else: return self.set_device_state(device, state, id_override, type_override) _LOGGER.info("Setting local state") local_id = id_override or device.local_id().split(".")[0] object_type = type_override or device.object_type() LOCAL_API_HEADERS['Authorization'] = "Bearer " + hub["token"] url_string = "https://{}:8888/{}s/{}".format(hub["ip"], object_type, local_id) try: arequest = requests.put(url_string, data=json.dumps(state), headers=LOCAL_API_HEADERS, verify=False, timeout=3) except requests.exceptions.RequestException: _LOGGER.error("Error sending local control request. Sending request online") return self.set_device_state(device, state, id_override, type_override) response_json = arequest.json() _LOGGER.debug('%s', response_json) temp_state = device.json_state for key, value in response_json["data"]["last_reading"].items(): temp_state["last_reading"][key] = value return temp_state else: return self.set_device_state(device, state, id_override, type_override)
<SYSTEM_TASK:> Get device state via online API. <END_TASK> <USER_TASK:> Description: def get_device_state(self, device, id_override=None, type_override=None): """ Get device state via online API. Args: device (WinkDevice): The device the change is being requested for. id_override (String, optional): A device ID used to override the passed in device's ID. Used to make changes on sub-devices. i.e. Outlet in a Powerstrip. The Parent device's ID. type_override (String, optional): Used to override the device type when a device inherits from a device other than WinkDevice. Returns: response_json (Dict): The API's response in dictionary format """
_LOGGER.info("Getting state via online API") object_id = id_override or device.object_id() object_type = type_override or device.object_type() url_string = "{}/{}s/{}".format(self.BASE_URL, object_type, object_id) arequest = requests.get(url_string, headers=API_HEADERS) response_json = arequest.json() _LOGGER.debug('%s', response_json) return response_json
<SYSTEM_TASK:> Get device state via local API, and fall back to online API. <END_TASK> <USER_TASK:> Description: def local_get_state(self, device, id_override=None, type_override=None): """ Get device state via local API, and fall back to online API. Args: device (WinkDevice): The device the change is being requested for. id_override (String, optional): A device ID used to override the passed in device's ID. Used to make changes on sub-devices. i.e. Outlet in a Powerstrip. The Parent device's ID. type_override (String, optional): Used to override the device type when a device inherits from a device other than WinkDevice. Returns: response_json (Dict): The API's response in dictionary format """
if ALLOW_LOCAL_CONTROL: if device.local_id() is not None: hub = HUBS.get(device.hub_id()) if hub is not None and hub["token"] is not None: ip = hub["ip"] access_token = hub["token"] else: return self.get_device_state(device, id_override, type_override) else: return self.get_device_state(device, id_override, type_override) _LOGGER.info("Getting local state") local_id = id_override or device.local_id() object_type = type_override or device.object_type() LOCAL_API_HEADERS['Authorization'] = "Bearer " + access_token url_string = "https://{}:8888/{}s/{}".format(ip, object_type, local_id) try: arequest = requests.get(url_string, headers=LOCAL_API_HEADERS, verify=False, timeout=3) except requests.exceptions.RequestException: _LOGGER.error("Error sending local control request. Sending request online") return self.get_device_state(device, id_override, type_override) response_json = arequest.json() _LOGGER.debug('%s', response_json) temp_state = device.json_state for key, value in response_json["data"]["last_reading"].items(): temp_state["last_reading"][key] = value return temp_state else: return self.get_device_state(device, id_override, type_override)
<SYSTEM_TASK:> Make a call to the update_firmware endpoint. As far as I know this <END_TASK> <USER_TASK:> Description: def update_firmware(self, device, id_override=None, type_override=None): """ Make a call to the update_firmware endpoint. As far as I know this is only valid for Wink hubs. Args: device (WinkDevice): The device the change is being requested for. id_override (String, optional): A device ID used to override the passed in device's ID. Used to make changes on sub-devices. i.e. Outlet in a Powerstrip. The Parent device's ID. type_override (String, optional): Used to override the device type when a device inherits from a device other than WinkDevice. Returns: response_json (Dict): The API's response in dictionary format """
object_id = id_override or device.object_id() object_type = type_override or device.object_type() url_string = "{}/{}s/{}/update_firmware".format(self.BASE_URL, object_type, object_id) try: arequest = requests.post(url_string, headers=API_HEADERS) response_json = arequest.json() return response_json except requests.exceptions.RequestException: return None
<SYSTEM_TASK:> Remove a device. <END_TASK> <USER_TASK:> Description: def remove_device(self, device, id_override=None, type_override=None): """ Remove a device. Args: device (WinkDevice): The device the change is being requested for. id_override (String, optional): A device ID used to override the passed in device's ID. Used to make changes on sub-devices. i.e. Outlet in a Powerstrip. The Parent device's ID. type_override (String, optional): Used to override the device type when a device inherits from a device other than WinkDevice. Returns: (boolean): True if the device was removed. """
object_id = id_override or device.object_id() object_type = type_override or device.object_type() url_string = "{}/{}s/{}".format(self.BASE_URL, object_type, object_id) try: arequest = requests.delete(url_string, headers=API_HEADERS) if arequest.status_code == 204: return True _LOGGER.error("Failed to remove device. Status code: %s", arequest.status_code) return False except requests.exceptions.RequestException: _LOGGER.error("Failed to remove device.") return False
<SYSTEM_TASK:> Create a new lock key code. <END_TASK> <USER_TASK:> Description: def create_lock_key(self, device, new_device_json, id_override=None, type_override=None): """ Create a new lock key code. Args: device (WinkDevice): The device the change is being requested for. new_device_json (String): The JSON string required to create the device. id_override (String, optional): A device ID used to override the passed in device's ID. Used to make changes on sub-devices. i.e. Outlet in a Powerstrip. The Parent device's ID. type_override (String, optional): Used to override the device type when a device inherits from a device other than WinkDevice. Returns: response_json (Dict): The API's response in dictionary format """
object_id = id_override or device.object_id() object_type = type_override or device.object_type() url_string = "{}/{}s/{}/keys".format(self.BASE_URL, object_type, object_id) try: arequest = requests.post(url_string, data=json.dumps(new_device_json), headers=API_HEADERS) response_json = arequest.json() return response_json except requests.exceptions.RequestException: return None
<SYSTEM_TASK:> Get model defined in Meta. <END_TASK> <USER_TASK:> Description: def get_concrete_model(model): """ Get model defined in Meta. :param str or django.db.models.Model model: :return: model or None :rtype django.db.models.Model or None: :raise ValueError: model is not found or abstract """
if not(inspect.isclass(model) and issubclass(model, models.Model)): model = get_model_by_name(model) return model
<SYSTEM_TASK:> Merge meta parameters. <END_TASK> <USER_TASK:> Description: def merge_metas(*metas): """ Merge meta parameters. next meta has priority over current, it will overwrite attributes. :param class or None meta: class with properties. :return class: merged meta. """
metadict = {} for meta in metas: metadict.update(meta.__dict__) metadict = {k: v for k, v in metadict.items() if not k.startswith('__')} return type('Meta', (object, ), metadict)
<SYSTEM_TASK:> Activate the scene. <END_TASK> <USER_TASK:> Description: def activate(self): """ Activate the scene. """
response = self.api_interface.set_device_state(self, None) self._update_state_from_response(response)
<SYSTEM_TASK:> Get model by its name. <END_TASK> <USER_TASK:> Description: def get_model_by_name(model_name): """ Get model by its name. :param str model_name: name of model. :return django.db.models.Model: Example: get_concrete_model_by_name('auth.User') django.contrib.auth.models.User """
if isinstance(model_name, six.string_types) and \ len(model_name.split('.')) == 2: app_name, model_name = model_name.split('.') if django.VERSION[:2] < (1, 8): model = models.get_model(app_name, model_name) else: from django.apps import apps model = apps.get_model(app_name, model_name) else: raise ValueError("{0} is not a Django model".format(model_name)) return model
<SYSTEM_TASK:> Get model name for the field. <END_TASK> <USER_TASK:> Description: def get_model_name(model): """ Get model name for the field. Django 1.5 uses module_name, does not support model_name Django 1.6 uses module_name and model_name DJango 1.7 uses model_name, module_name raises RemovedInDjango18Warning """
opts = model._meta if django.VERSION[:2] < (1, 7): model_name = opts.module_name else: model_name = opts.model_name return model_name
<SYSTEM_TASK:> Clear django cache for models. <END_TASK> <USER_TASK:> Description: def clear_app_cache(app_name): """ Clear django cache for models. :param str ap_name: name of application to clear model cache """
loading_cache = django.db.models.loading.cache if django.VERSION[:2] < (1, 7): loading_cache.app_models[app_name].clear() else: loading_cache.all_models[app_name].clear()
<SYSTEM_TASK:> Translator from namedtuple config representation to <END_TASK> <USER_TASK:> Description: def _init_sbc_config(self, config): """ Translator from namedtuple config representation to the sbc_t type. :param namedtuple config: See :py:class:`.SBCCodecConfig` :returns: """
if (config.channel_mode == SBCChannelMode.CHANNEL_MODE_MONO): self.config.mode = self.codec.SBC_MODE_MONO elif (config.channel_mode == SBCChannelMode.CHANNEL_MODE_STEREO): self.config.mode = self.codec.SBC_MODE_STEREO elif (config.channel_mode == SBCChannelMode.CHANNEL_MODE_DUAL): self.config.mode = self.codec.SBC_MODE_DUAL_CHANNEL elif (config.channel_mode == SBCChannelMode.CHANNEL_MODE_JOINT_STEREO): self.config.mode = self.codec.SBC_MODE_JOINT_STEREO if (config.frequency == SBCSamplingFrequency.FREQ_16KHZ): self.config.frequency = self.codec.SBC_FREQ_16000 elif (config.frequency == SBCSamplingFrequency.FREQ_32KHZ): self.config.frequency = self.codec.SBC_FREQ_32000 elif (config.frequency == SBCSamplingFrequency.FREQ_44_1KHZ): self.config.frequency = self.codec.SBC_FREQ_44100 elif (config.frequency == SBCSamplingFrequency.FREQ_48KHZ): self.config.frequency = self.codec.SBC_FREQ_48000 if (config.allocation_method == SBCAllocationMethod.LOUDNESS): self.config.allocation = self.codec.SBC_AM_LOUDNESS elif (config.allocation_method == SBCAllocationMethod.SNR): self.config.allocation = self.codec.SBC_AM_SNR if (config.subbands == SBCSubbands.SUBBANDS_4): self.config.subbands = self.codec.SBC_SB_4 elif (config.subbands == SBCSubbands.SUBBANDS_8): self.config.subbands = self.codec.SBC_SB_8 if (config.block_length == SBCBlocks.BLOCKS_4): self.config.blocks = self.codec.SBC_BLK_4 elif (config.block_length == SBCBlocks.BLOCKS_8): self.config.blocks = self.codec.SBC_BLK_8 elif (config.block_length == SBCBlocks.BLOCKS_12): self.config.blocks = self.codec.SBC_BLK_12 elif (config.block_length == SBCBlocks.BLOCKS_16): self.config.blocks = self.codec.SBC_BLK_16 self.config.bitpool = config.max_bitpool self.config.endian = self.codec.SBC_LE
<SYSTEM_TASK:> Read the media transport descriptor, depay <END_TASK> <USER_TASK:> Description: def decode(self, fd, mtu, max_len=2560): """ Read the media transport descriptor, depay the RTP payload and decode the SBC frames into a byte array. The maximum number of bytes to be returned may be passed as an argument and all available bytes are returned to the caller. :param int fd: Media transport file descriptor :param int mtu: Media transport MTU size as returned when the media transport was acquired. :param int max_len: Optional. Set maximum number of bytes to read. :return data: Decoded data bytes as an array. :rtype: array{byte} """
output_buffer = ffi.new('char[]', max_len) sz = self.codec.rtp_sbc_decode_from_fd(self.config, output_buffer, max_len, mtu, fd) return ffi.buffer(output_buffer[0:sz])
<SYSTEM_TASK:> Wrapper for calling user callback routine to notify <END_TASK> <USER_TASK:> Description: def _transport_ready_handler(self, fd, cb_condition): """ Wrapper for calling user callback routine to notify when transport data is ready to read """
if(self.user_cb): self.user_cb(self.user_arg) return True
<SYSTEM_TASK:> Read data from media transport. <END_TASK> <USER_TASK:> Description: def read_transport(self): """ Read data from media transport. The returned data payload is SBC decoded and has all RTP encapsulation removed. :return data: Payload data that has been decoded, with RTP encapsulation removed. :rtype: array{byte} """
if ('r' not in self.access_type): raise BTIncompatibleTransportAccessType return self.codec.decode(self.fd, self.read_mtu)
<SYSTEM_TASK:> Write data to media transport. The data is <END_TASK> <USER_TASK:> Description: def write_transport(self, data): """ Write data to media transport. The data is encoded using the SBC codec and RTP encapsulated before being written to the transport file descriptor. :param array{byte} data: Payload data to encode, encapsulate and send. """
if ('w' not in self.access_type): raise BTIncompatibleTransportAccessType return self.codec.encode(self.fd, self.write_mtu, data)
<SYSTEM_TASK:> Forcibly close previously acquired media transport. <END_TASK> <USER_TASK:> Description: def close_transport(self): """ Forcibly close previously acquired media transport. .. note:: The user should first make sure any transport event handlers are unregistered first. """
if (self.path): self._release_media_transport(self.path, self.access_type) self.path = None
<SYSTEM_TASK:> Should be called by subclass when it is ready <END_TASK> <USER_TASK:> Description: def _acquire_media_transport(self, path, access_type): """ Should be called by subclass when it is ready to acquire the media transport file descriptor """
transport = BTMediaTransport(path=path) (fd, read_mtu, write_mtu) = transport.acquire(access_type) self.fd = fd.take() # We must do the clean-up later self.write_mtu = write_mtu self.read_mtu = read_mtu self.access_type = access_type self.path = path self._install_transport_ready()
<SYSTEM_TASK:> Should be called by subclass when it is finished <END_TASK> <USER_TASK:> Description: def _release_media_transport(self, path, access_type): """ Should be called by subclass when it is finished with the media transport file descriptor """
try: self._uninstall_transport_ready() os.close(self.fd) # Clean-up previously taken fd transport = BTMediaTransport(path=path) transport.release(access_type) except: pass
<SYSTEM_TASK:> Helper to turn SBC codec configuration params into a <END_TASK> <USER_TASK:> Description: def _make_config(config): """Helper to turn SBC codec configuration params into a a2dp_sbc_t structure usable by bluez"""
# The SBC config encoding is taken from a2dp_codecs.h, in particular, # the a2dp_sbc_t type is converted into a 4-byte array: # uint8_t channel_mode:4 # uint8_t frequency:4 # uint8_t allocation_method:2 # uint8_t subbands:2 # uint8_t block_length:4 # uint8_t min_bitpool # uint8_t max_bitpool return dbus.Array([dbus.Byte(config.channel_mode | (config.frequency << 4)), dbus.Byte(config.allocation_method | (config.subbands << 2) | (config.block_length << 4)), dbus.Byte(config.min_bitpool), dbus.Byte(config.max_bitpool)])
<SYSTEM_TASK:> Helper to turn a2dp_sbc_t structure into a <END_TASK> <USER_TASK:> Description: def _parse_config(config): """Helper to turn a2dp_sbc_t structure into a more usable set of SBC codec configuration params"""
frequency = config[0] >> 4 channel_mode = config[0] & 0xF allocation_method = config[1] & 0x03 subbands = (config[1] >> 2) & 0x03 block_length = (config[1] >> 4) & 0x0F min_bitpool = config[2] max_bitpool = config[3] return SBCCodecConfig(channel_mode, frequency, allocation_method, subbands, block_length, min_bitpool, max_bitpool)
<SYSTEM_TASK:> Add a new user key code. <END_TASK> <USER_TASK:> Description: def add_new_key(self, code, name): """Add a new user key code."""
device_json = {"code": code, "name": name} return self.api_interface.create_lock_key(self, device_json)
<SYSTEM_TASK:> Creates a new object path for a remote device. This <END_TASK> <USER_TASK:> Description: def create_paired_device(self, dev_id, agent_path, capability, cb_notify_device, cb_notify_error): """ Creates a new object path for a remote device. This method will connect to the remote device and retrieve all SDP records and then initiate the pairing. If a previously :py:meth:`create_device` was used successfully, this method will only initiate the pairing. Compared to :py:meth:`create_device` this method will fail if the pairing already exists, but not if the object path already has been created. This allows applications to use :py:meth:`create_device` first and then, if needed, use :py:meth:`create_paired_device` to initiate pairing. The agent object path is assumed to reside within the process (D-Bus connection instance) that calls this method. No separate registration procedure is needed for it and it gets automatically released once the pairing operation is complete. :param str dev_id: New device MAC address create e.g., '11:22:33:44:55:66' :param str agent_path: Path used when creating the bluetooth agent e.g., '/test/agent' :param str capability: Pairing agent capability e.g., 'DisplayYesNo', etc :param func cb_notify_device: Callback on success. The callback is called with the new device's object path as an argument. :param func cb_notify_error: Callback on error. The callback is called with the error reason. :return: :raises dbus.Exception: org.bluez.Error.InvalidArguments :raises dbus.Exception: org.bluez.Error.Failed """
return self._interface.CreatePairedDevice(dev_id, agent_path, capability, reply_handler=cb_notify_device, # noqa error_handler=cb_notify_error)
<SYSTEM_TASK:> Traverse the body of the node manually. <END_TASK> <USER_TASK:> Description: def _visit_body(self, node): """ Traverse the body of the node manually. If the first node is an expression which contains a string or bytes it marks that as a docstring. """
if (node.body and isinstance(node.body[0], ast.Expr) and self.is_base_string(node.body[0].value)): node.body[0].value.is_docstring = True self.visit(node.body[0].value) for sub_node in node.body: self.visit(sub_node)
<SYSTEM_TASK:> Lookup BuiltWith results for the given domain. If API version 2 is used and the get_last_full_query flag <END_TASK> <USER_TASK:> Description: def lookup(self, domain, get_last_full_query=True): """ Lookup BuiltWith results for the given domain. If API version 2 is used and the get_last_full_query flag enabled, it also queries for the date of the last full BuiltWith scan. """
last_full_builtwith_scan_date = None if self.api_version == 7 and isinstance(domain, list): domain = ','.join(domain) if self.api_version in [2, 7]: last_updates_resp = requests.get(ENDPOINTS_BY_API_VERSION[self.api_version], params={'UPDATE': 1}) last_updated_data = last_updates_resp.json() if get_last_full_query and last_updated_data['FULL']: last_full_builtwith_scan_date = datetime.datetime.strptime(last_updated_data['FULL'], '%Y-%m-%d').date() params = { 'KEY': self.key, 'LOOKUP': domain, } response = requests.get(ENDPOINTS_BY_API_VERSION[self.api_version], params=params) if self.api_version == 1: return response.json() elif self.api_version == 2: return BuiltWithDomainInfo(response.json(), last_full_builtwith_scan_date) elif self.api_version == 7: domain_info = list() for result in response.json()['Results']: domain_info.append(BuiltWithDomainInfo(result['Result'], last_full_builtwith_scan_date)) return domain_info
<SYSTEM_TASK:> Get all of the api endpoints. <END_TASK> <USER_TASK:> Description: def urls(self): """ Get all of the api endpoints. NOTE: only for django as of now. NOTE: urlpatterns are deprecated since Django1.8 :return list: urls """
from django.conf.urls import url urls = [ url(r'^$', self.documentation), url(r'^map$', self.map_view), ] for resource_name in self.resource_map: urls.extend([ url(r'(?P<resource_name>{})$'.format( resource_name), self.handler_view), url(r'(?P<resource_name>{})/(?P<ids>[\w\-\,]+)$'.format( resource_name), self.handler_view), ]) return urls
<SYSTEM_TASK:> Applies the Caesar shift cipher. <END_TASK> <USER_TASK:> Description: def cipher(self): """Applies the Caesar shift cipher. Based on the attributes of the object, applies the Caesar shift cipher to the message attribute. Accepts positive and negative integers as offsets. Required attributes: message offset Returns: String with cipher applied. """
# If no offset is selected, pick random one with sufficient distance # from original. if self.offset is False: self.offset = randrange(5, 25) logging.info("Random offset selected: {0}".format(self.offset)) logging.debug("Offset set: {0}".format(self.offset)) # Cipher ciphered_message_list = list(self.message) for i, letter in enumerate(ciphered_message_list): if letter.isalpha(): # Use default upper and lower case characters if alphabet # not supplied by user. if letter.isupper(): alphabet = [character.upper() for character in self.alphabet] else: alphabet = self.alphabet logging.debug("Letter: {0}".format(letter)) logging.debug("Alphabet: {0}".format(alphabet)) value = alphabet.index(letter) cipher_value = value + self.offset if cipher_value > 25 or cipher_value < 0: cipher_value = cipher_value % 26 logging.debug("Cipher value: {0}".format(cipher_value)) ciphered_message_list[i] = alphabet[cipher_value] logging.debug("Ciphered letter: {0}".format(letter)) self.message = ''.join(ciphered_message_list) return self.message
<SYSTEM_TASK:> Calculates the entropy of a string based on known frequency of <END_TASK> <USER_TASK:> Description: def calculate_entropy(self, entropy_string): """Calculates the entropy of a string based on known frequency of English letters. Args: entropy_string: A str representing the string to calculate. Returns: A negative float with the total entropy of the string (higher is better). """
total = 0 for char in entropy_string: if char.isalpha(): prob = self.frequency[char.lower()] total += - math.log(prob) / math.log(2) logging.debug("Entropy score: {0}".format(total)) return total
<SYSTEM_TASK:> Attempts to crack ciphertext using frequency of letters in English. <END_TASK> <USER_TASK:> Description: def cracked(self): """Attempts to crack ciphertext using frequency of letters in English. Returns: String of most likely message. """
logging.info("Cracking message: {0}".format(self.message)) entropy_values = {} attempt_cache = {} message = self.message for i in range(25): self.message = message self.offset = i * -1 logging.debug("Attempting crack with offset: " "{0}".format(self.offset)) test_cipher = self.cipher() logging.debug("Attempting plaintext: {0}".format(test_cipher)) entropy_values[i] = self.calculate_entropy(test_cipher) attempt_cache[i] = test_cipher sorted_by_entropy = sorted(entropy_values, key=entropy_values.get) self.offset = sorted_by_entropy[0] * -1 cracked_text = attempt_cache[sorted_by_entropy[0]] self.message = cracked_text logging.debug("Entropy scores: {0}".format(entropy_values)) logging.debug("Lowest entropy score: " "{0}".format(str(entropy_values[sorted_by_entropy[0]]))) logging.debug("Most likely offset: {0}".format(self.offset)) logging.debug("Most likely message: {0}".format(cracked_text)) return cracked_text
<SYSTEM_TASK:> Decodes message using Caesar shift cipher <END_TASK> <USER_TASK:> Description: def decoded(self): """Decodes message using Caesar shift cipher Inverse operation of encoding, applies negative offset to Caesar shift cipher. Returns: String decoded with cipher. """
logging.info("Decoding message: {0}".format(self.message)) self.offset = self.offset * -1 return self.cipher()
<SYSTEM_TASK:> Parse querydict data. <END_TASK> <USER_TASK:> Description: def parse(cls, querydict): """ Parse querydict data. There are expected agruments: distinct, fields, filter, include, page, sort Parameters ---------- querydict : django.http.request.QueryDict MultiValueDict with query arguments. Returns ------- result : dict dictionary in format {key: value}. Raises ------ ValueError If args consist of not know key. """
for key in querydict.keys(): if not any((key in JSONAPIQueryDict._fields, cls.RE_FIELDS.match(key))): msg = "Query parameter {} is not known".format(key) raise ValueError(msg) result = JSONAPIQueryDict( distinct=cls.prepare_values(querydict.getlist('distinct')), fields=cls.parse_fields(querydict), filter=querydict.getlist('filter'), include=cls.prepare_values(querydict.getlist('include')), page=int(querydict.get('page')) if querydict.get('page') else None, sort=cls.prepare_values(querydict.getlist('sort')) ) return result
<SYSTEM_TASK:> Search all of the capabilities of the device and return the supported binary state field. <END_TASK> <USER_TASK:> Description: def binary_state_name(self): """ Search all of the capabilities of the device and return the supported binary state field. Default to returning powered. """
return_field = "powered" _capabilities = self.json_state.get('capabilities') if _capabilities is not None: _fields = _capabilities.get('fields') if _fields is not None: for field in _fields: if field.get('field') in SUPPORTED_BINARY_STATE_FIELDS: return_field = field.get('field') return return_field
<SYSTEM_TASK:> r"""Returns the net flux production for all states <END_TASK> <USER_TASK:> Description: def flux_production(F): r"""Returns the net flux production for all states Parameters ---------- F : (n, n) ndarray Matrix of flux values between pairs of states. Returns ------- prod : (n) ndarray array with flux production (positive) or consumption (negative) at each state """
influxes = np.array(np.sum(F, axis=0)).flatten() # all that flows in outfluxes = np.array(np.sum(F, axis=1)).flatten() # all that flows out prod = outfluxes - influxes # net flux into nodes return prod
<SYSTEM_TASK:> r"""Compute the total flux, or turnover flux, that is produced by the <END_TASK> <USER_TASK:> Description: def total_flux(F, A=None): r"""Compute the total flux, or turnover flux, that is produced by the flux sources and consumed by the flux sinks Parameters ---------- F : (n, n) ndarray Matrix of flux values between pairs of states. A : array_like (optional) List of integer state labels for set A (reactant) Returns ------- F : float The total flux, or turnover flux, that is produced by the flux sources and consumed by the flux sinks """
if A is None: prod = flux_production(F) zeros = np.zeros(len(prod)) outflux = np.sum(np.maximum(prod, zeros)) return outflux else: X = set(np.arange(F.shape[0])) # total state space A = set(A) notA = X.difference(A) outflux = (F[list(A), :])[:, list(notA)].sum() return outflux
<SYSTEM_TASK:> Add the initialization lines to the journal. <END_TASK> <USER_TASK:> Description: def _init_journal(self, permissive=True): """Add the initialization lines to the journal. By default adds JrnObj variable and timestamp to the journal contents. Args: permissive (bool): if True most errors in journal will not cause Revit to stop journal execution. Some still do. """
nowstamp = datetime.now().strftime("%d-%b-%Y %H:%M:%S.%f")[:-3] self._add_entry(templates.INIT .format(time_stamp=nowstamp)) if permissive: self._add_entry(templates.INIT_DEBUG)
<SYSTEM_TASK:> Append a new file from .rft entry to the journal. <END_TASK> <USER_TASK:> Description: def _new_from_rft(self, base_template, rft_file): """Append a new file from .rft entry to the journal. This instructs Revit to create a new model based on the provided .rft template. Args: base_template (str): new file journal template from rmj.templates rft_file (str): full path to .rft template to be used """
self._add_entry(base_template) self._add_entry(templates.NEW_FROM_RFT .format(rft_file_path=rft_file, rft_file_name=op.basename(rft_file)))
<SYSTEM_TASK:> Append a new model from .rft entry to the journal. <END_TASK> <USER_TASK:> Description: def new_model(self, template_name='<None>'): """Append a new model from .rft entry to the journal. This instructs Revit to create a new model based on the provided .rft template. Args: template_name (str): optional full path to .rft template to be used. default value is <None> """
self._add_entry(templates.NEW_MODEL .format(template_name=template_name))
<SYSTEM_TASK:> Append a new template from .rft entry to the journal. <END_TASK> <USER_TASK:> Description: def new_template(self, template_name='<None>'): """Append a new template from .rft entry to the journal. This instructs Revit to create a new template model based on the provided .rft template. Args: template_name (str): optional full path to .rft template to be used. default value is <None> """
self._add_entry(templates.NEW_MODEL_TEMPLATE .format(template_name=template_name))
<SYSTEM_TASK:> Append a open workshared model entry to the journal. <END_TASK> <USER_TASK:> Description: def open_workshared_model(self, model_path, central=False, detached=False, keep_worksets=True, audit=False, show_workset_config=1): """Append a open workshared model entry to the journal. This instructs Revit to open a workshared model. Args: model_path (str): full path to workshared model central (bool): if True opens central model and not local detached (bool): if True opens a detached model keep_worksets (bool): if True keeps worksets when detaching audit (bool): if True audits the model when opening """
if detached: if audit: if keep_worksets: self._add_entry( templates.CENTRAL_OPEN_DETACH_AUDIT .format(model_path=model_path, workset_config=show_workset_config) ) else: self._add_entry( templates.CENTRAL_OPEN_DETACH_AUDIT_DISCARD .format(model_path=model_path, workset_config=show_workset_config) ) else: if keep_worksets: self._add_entry( templates.CENTRAL_OPEN_DETACH .format(model_path=model_path, workset_config=show_workset_config) ) else: self._add_entry( templates.CENTRAL_OPEN_DETACH_DISCARD .format(model_path=model_path, workset_config=show_workset_config) ) elif central: if audit: self._add_entry( templates.CENTRAL_OPEN_AUDIT .format(model_path=model_path, workset_config=show_workset_config) ) else: self._add_entry( templates.CENTRAL_OPEN .format(model_path=model_path, workset_config=show_workset_config) ) else: if audit: self._add_entry( templates.WORKSHARED_OPEN_AUDIT .format(model_path=model_path, workset_config=show_workset_config) ) else: self._add_entry( templates.WORKSHARED_OPEN .format(model_path=model_path, workset_config=show_workset_config) )
<SYSTEM_TASK:> Append a open non-workshared model entry to the journal. <END_TASK> <USER_TASK:> Description: def open_model(self, model_path, audit=False): """Append a open non-workshared model entry to the journal. This instructs Revit to open a non-workshared model. Args: model_path (str): full path to non-workshared model audit (bool): if True audits the model when opening """
if audit: self._add_entry(templates.FILE_OPEN_AUDIT .format(model_path=model_path)) else: self._add_entry(templates.FILE_OPEN .format(model_path=model_path))
<SYSTEM_TASK:> Append an execute external command entry to the journal. <END_TASK> <USER_TASK:> Description: def execute_command(self, tab_name, panel_name, command_module, command_class, command_data=None): """Append an execute external command entry to the journal. This instructs Revit to execute the provided command from the provided module, tab, and panel. Args: tab_name (str): name of ribbon tab that contains the command panel_name (str): name of ribbon panel that contains the command command_module (str): name of module that provides the command command_class (str): name of command class inside command module command_data (dict): dict of string data to be passed to command Examples: >>> jm = JournalMaker() >>> cmdata = {'key1':'value1', 'key2':'value2'} >>> jm.execute_command(tab_name='Add-Ins', ... panel_name='Panel Name', ... command_module='Addon App Namespace', ... command_class='Command Classname', ... command_data=cmdata) """
# make sure command_data is not empty command_data = {} if command_data is None else command_data # make the canonical name for the command cmdclassname = '{}.{}'.format(command_module, command_class) self._add_entry(templates.EXTERNAL_COMMAND .format(external_command_tab=tab_name, external_command_panel=panel_name, command_class_name=command_class, command_class=cmdclassname)) # count the data data_count = len(command_data.keys()) # create the entry for the command data if data_count > 0: data_str_list = [] for k, v in command_data.items(): data_str_list.append(' "{}" , "{}"'.format(k, v)) data_str = '_\n ,'.join(data_str_list) self._add_entry(templates.EXTERNAL_COMMANDDATA .format(data_count=data_count, data_string=data_str))
<SYSTEM_TASK:> Execute a dynamo definition. <END_TASK> <USER_TASK:> Description: def execute_dynamo_definition(self, definition_path, show_ui=False, shutdown=True, automation=False, path_exec=True): """Execute a dynamo definition. Args: definition_path (str): full path to dynamo definition file show_ui (bool): show dynamo UI at execution shutdown (bool): shutdown model after execution automation (bool): activate dynamo automation path_exec (bool): activate dynamo path execute Examples: >>> jm = JournalMaker() >>> jm.execute_dynamo_definition( ... definition_path='C:/testdef.dyn', ... show_ui=True, ... shutdown=True ... ) """
self._add_entry(templates.DYNAMO_COMMAND .format(dynamo_def_path=definition_path, dyn_show_ui=show_ui, dyn_automation=automation, dyn_path_exec=path_exec, dyn_shutdown=shutdown))
<SYSTEM_TASK:> Append a import family entry to the journal. <END_TASK> <USER_TASK:> Description: def import_family(self, rfa_file): """Append a import family entry to the journal. This instructs Revit to import a family into the opened model. Args: rfa_file (str): full path of the family file """
self._add_entry(templates.IMPORT_FAMILY .format(family_file=rfa_file))
<SYSTEM_TASK:> Append an export warnings entry to the journal. <END_TASK> <USER_TASK:> Description: def export_warnings(self, export_file): """Append an export warnings entry to the journal. This instructs Revit to export warnings from the opened model. Currently Revit will stop journal execution if the model does not have any warnings and the export warnings UI button is disabled. Args: export_file (str): full path of the ouput html file """
warn_filepath = op.dirname(export_file) warn_filename = op.splitext(op.basename(export_file))[0] self._add_entry(templates.EXPORT_WARNINGS .format(warnings_export_path=warn_filepath, warnings_export_file=warn_filename))
<SYSTEM_TASK:> Append an purge model entry to the journal. <END_TASK> <USER_TASK:> Description: def purge_unused(self, pass_count=3): """Append an purge model entry to the journal. This instructs Revit to purge the open model. Args: pass_count (int): number of times to execute the purge. default is 3 """
for purge_count in range(0, pass_count): self._add_entry(templates.PROJECT_PURGE)
<SYSTEM_TASK:> Append a sync model entry to the journal. <END_TASK> <USER_TASK:> Description: def sync_model(self, comment='', compact_central=False, release_borrowed=True, release_workset=True, save_local=False): """Append a sync model entry to the journal. This instructs Revit to sync the currently open workshared model. Args: comment (str): comment to be provided for the sync step compact_central (bool): if True compacts the central file release_borrowed (bool): if True releases the borrowed elements release_workset (bool): if True releases the borrowed worksets save_local (bool): if True saves the local file as well """
self._add_entry(templates.FILE_SYNC_START) if compact_central: self._add_entry(templates.FILE_SYNC_COMPACT) if release_borrowed: self._add_entry(templates.FILE_SYNC_RELEASE_BORROWED) if release_workset: self._add_entry(templates.FILE_SYNC_RELEASE_USERWORKSETS) if save_local: self._add_entry(templates.FILE_SYNC_RELEASE_SAVELOCAL) self._add_entry(templates.FILE_SYNC_COMMENT_OK .format(sync_comment=comment))
<SYSTEM_TASK:> Write the constructed journal in to the provided file. <END_TASK> <USER_TASK:> Description: def write_journal(self, journal_file_path): """Write the constructed journal in to the provided file. Args: journal_file_path (str): full path to output journal file """
# TODO: assert the extension is txt and not other with open(journal_file_path, "w") as jrn_file: jrn_file.write(self._journal_contents)
<SYSTEM_TASK:> Check whether the provided string exists in Journal file. <END_TASK> <USER_TASK:> Description: def endswith(self, search_str): """Check whether the provided string exists in Journal file. Only checks the last 5 lines of the journal file. This method is usually used when tracking a journal from an active Revit session. Args: search_str (str): string to search for Returns: bool: if True the search string is found """
for entry in reversed(list(open(self._jrnl_file, 'r'))[-5:]): if search_str in entry: return True return False
<SYSTEM_TASK:> r"""Neighbor prior of strength alpha for the given count matrix. <END_TASK> <USER_TASK:> Description: def prior_neighbor(C, alpha=0.001): r"""Neighbor prior of strength alpha for the given count matrix. Prior is defined by b_ij = alpha if Z_ij+Z_ji > 0 b_ij = 0 else Parameters ---------- C : (M, M) scipy.sparse matrix Count matrix alpha : float (optional) Value of prior counts Returns ------- B : (M, M) scipy.sparse matrix Prior count matrix """
C_sym = C + C.transpose() C_sym = C_sym.tocoo() data = C_sym.data row = C_sym.row col = C_sym.col data_B = alpha * np.ones_like(data) B = coo_matrix((data_B, (row, col))) return B
<SYSTEM_TASK:> Constant prior of strength alpha. <END_TASK> <USER_TASK:> Description: def prior_const(C, alpha=0.001): """Constant prior of strength alpha. Prior is defined via b_ij=alpha for all i,j Parameters ---------- C : (M, M) ndarray or scipy.sparse matrix Count matrix alpha : float (optional) Value of prior counts Returns ------- B : (M, M) ndarray Prior count matrix """
B = alpha * np.ones(C.shape) return B
<SYSTEM_TASK:> Tests whether T is a transition matrix <END_TASK> <USER_TASK:> Description: def is_transition_matrix(T, tol=1e-10): """ Tests whether T is a transition matrix Parameters ---------- T : ndarray shape=(n, n) matrix to test tol : float tolerance to check with Returns ------- Truth value : bool True, if all elements are in interval [0, 1] and each row of T sums up to 1. False, otherwise """
if T.ndim != 2: return False if T.shape[0] != T.shape[1]: return False dim = T.shape[0] X = np.abs(T) - T x = np.sum(T, axis=1) return np.abs(x - np.ones(dim)).max() < dim * tol and X.max() < 2.0 * tol
<SYSTEM_TASK:> r"""Generate a count matrix from a given list discrete trajectories. <END_TASK> <USER_TASK:> Description: def count_matrix_coo2_mult(dtrajs, lag, sliding=True, sparse=True, nstates=None): r"""Generate a count matrix from a given list discrete trajectories. The generated count matrix is a sparse matrix in compressed sparse row (CSR) or numpy ndarray format. Parameters ---------- dtraj : list of ndarrays discrete trajectories lag : int Lagtime in trajectory steps sliding : bool, optional If true the sliding window approach is used for transition counting sparse : bool (optional) Whether to return a dense or a sparse matrix nstates : int, optional Enforce a count-matrix with shape=(nstates, nstates). If there are more states in the data, this will lead to an exception. Returns ------- C : scipy.sparse.csr_matrix or numpy.ndarray The countmatrix at given lag in scipy compressed sparse row or numpy ndarray format. """
# Determine number of states if nstates is None: from msmtools.dtraj import number_of_states nstates = number_of_states(dtrajs) rows = [] cols = [] # collect transition index pairs for dtraj in dtrajs: if dtraj.size > lag: if (sliding): rows.append(dtraj[0:-lag]) cols.append(dtraj[lag:]) else: rows.append(dtraj[0:-lag:lag]) cols.append(dtraj[lag::lag]) # is there anything? if len(rows) == 0: raise ValueError('No counts found - lag ' + str(lag) + ' may exceed all trajectory lengths.') # feed into one COO matrix row = np.concatenate(rows) col = np.concatenate(cols) data = np.ones(row.size) C = scipy.sparse.coo_matrix((data, (row, col)), shape=(nstates, nstates)) # export to output format if sparse: return C.tocsr() else: return C.toarray()
<SYSTEM_TASK:> True if T is a transition matrix <END_TASK> <USER_TASK:> Description: def is_transition_matrix(T, tol): """ True if T is a transition matrix Parameters ---------- T : scipy.sparse matrix Matrix to check tol : float tolerance to check with Returns ------- Truth value: bool True, if T is positive and normed False, otherwise """
T = T.tocsr() # compressed sparse row for fast row slicing values = T.data # non-zero entries of T """Check entry-wise positivity""" is_positive = np.allclose(values, np.abs(values), rtol=tol) """Check row normalization""" is_normed = np.allclose(T.sum(axis=1), 1.0, rtol=tol) return is_positive and is_normed
<SYSTEM_TASK:> r"""Check connectivity of the transition matrix. <END_TASK> <USER_TASK:> Description: def is_connected(T, directed=True): r"""Check connectivity of the transition matrix. Return true, if the input matrix is completely connected, effectively checking if the number of connected components equals one. Parameters ---------- T : scipy.sparse matrix Transition matrix directed : bool, optional Whether to compute connected components for a directed or undirected graph. Default is True. Returns ------- connected : boolean, returning true only if T is connected. """
nc = connected_components(T, directed=directed, connection='strong', \ return_labels=False) return nc == 1
<SYSTEM_TASK:> checks if T is 'ergodic' <END_TASK> <USER_TASK:> Description: def is_ergodic(T, tol): """ checks if T is 'ergodic' Parameters ---------- T : scipy.sparse matrix Transition matrix tol : float tolerance Returns ------- Truth value : bool True, if # strongly connected components = 1 False, otherwise """
if isdense(T): T = T.tocsr() if not is_transition_matrix(T, tol): raise ValueError("given matrix is not a valid transition matrix.") num_components = connected_components(T, directed=True, \ connection='strong', \ return_labels=False) return num_components == 1
<SYSTEM_TASK:> r"""Find dynamic bottleneck of flux network. <END_TASK> <USER_TASK:> Description: def find_bottleneck(F, A, B): r"""Find dynamic bottleneck of flux network. Parameters ---------- F : scipy.sparse matrix The flux network A : array_like The set of starting states B : array_like The set of end states Returns ------- e : tuple of int The edge corresponding to the dynamic bottleneck """
if F.nnz == 0: raise PathwayError('no more pathways left: Flux matrix does not contain any positive entries') F = F.tocoo() n = F.shape[0] """Get exdges and corresponding flux values""" val = F.data row = F.row col = F.col """Sort edges according to flux""" ind = np.argsort(val) val = val[ind] row = row[ind] col = col[ind] """Check if edge with largest conductivity connects A and B""" b = np.array(row[-1], col[-1]) if has_path(b, A, B): return b else: """Bisection of flux-value array""" r = val.size l = 0 N = 0 while r - l > 1: m = np.int(np.floor(0.5 * (r + l))) valtmp = val[m:] rowtmp = row[m:] coltmp = col[m:] C = coo_matrix((valtmp, (rowtmp, coltmp)), shape=(n, n)) """Check if there is a path connecting A and B by iterating over all starting nodes in A""" if has_connection(C, A, B): l = 1 * m else: r = 1 * m E_AB = coo_matrix((val[l + 1:], (row[l + 1:], col[l + 1:])), shape=(n, n)) b1 = row[l] b2 = col[l] return b1, b2, E_AB
<SYSTEM_TASK:> r"""Check if the given graph contains a path connecting A and B. <END_TASK> <USER_TASK:> Description: def has_connection(graph, A, B): r"""Check if the given graph contains a path connecting A and B. Parameters ---------- graph : scipy.sparse matrix Adjacency matrix of the graph A : array_like The set of starting states B : array_like The set of end states Returns ------- hc : bool True if the graph contains a path connecting A and B, otherwise False. """
for istart in A: nodes = csgraph.breadth_first_order(graph, istart, directed=True, return_predecessors=False) if has_path(nodes, A, B): return True return False
<SYSTEM_TASK:> r"""Test if nodes from a breadth_first_order search lead from A to <END_TASK> <USER_TASK:> Description: def has_path(nodes, A, B): r"""Test if nodes from a breadth_first_order search lead from A to B. Parameters ---------- nodes : array_like Nodes from breadth_first_oder_seatch A : array_like The set of educt states B : array_like The set of product states Returns ------- has_path : boolean True if there exists a path, else False """
x1 = np.intersect1d(nodes, A).size > 0 x2 = np.intersect1d(nodes, B).size > 0 return x1 and x2
<SYSTEM_TASK:> r"""Compute the dominant reaction-pathway. <END_TASK> <USER_TASK:> Description: def pathway(F, A, B): r"""Compute the dominant reaction-pathway. Parameters ---------- F : (M, M) scipy.sparse matrix The flux network (matrix of netflux values) A : array_like The set of starting states B : array_like The set of end states Returns ------- w : list The dominant reaction-pathway """
if F.nnz == 0: raise PathwayError('no more pathways left: Flux matrix does not contain any positive entries') b1, b2, F = find_bottleneck(F, A, B) if np.any(A == b1): wL = [b1, ] elif np.any(B == b1): raise PathwayError(("Roles of vertices b1 and b2 are switched." "This should never happen for a correct flux network" "obtained from a reversible transition meatrix.")) else: wL = pathway(F, A, [b1, ]) if np.any(B == b2): wR = [b2, ] elif np.any(A == b2): raise PathwayError(("Roles of vertices b1 and b2 are switched." "This should never happen for a correct flux network" "obtained from a reversible transition meatrix.")) else: wR = pathway(F, [b2, ], B) return wL + wR
<SYSTEM_TASK:> r"""Remove capacity along a path from flux network. <END_TASK> <USER_TASK:> Description: def remove_path(F, path): r"""Remove capacity along a path from flux network. Parameters ---------- F : (M, M) scipy.sparse matrix The flux network (matrix of netflux values) path : list Reaction path Returns ------- F : (M, M) scipy.sparse matrix The updated flux network """
c = capacity(F, path) F = F.todok() L = len(path) for l in range(L - 1): i = path[l] j = path[l + 1] F[i, j] -= c return F
<SYSTEM_TASK:> r"""Adds artifical end states replacing source and sink sets. <END_TASK> <USER_TASK:> Description: def add_endstates(F, A, B): r"""Adds artifical end states replacing source and sink sets. Parameters ---------- F : (M, M) scipy.sparse matrix The flux network (matrix of netflux values) A : array_like The set of starting states B : array_like The set of end states Returns ------- F_new : (M+2, M+2) scipy.sparse matrix The artifical flux network with extra end states a_new : int The new single source a_new = M b_new : int The new single sink b_new = M+1 """
"""Outgoing currents from A""" F = F.tocsr() outA = (F[A, :].sum(axis=1)).getA()[:, 0] """Incoming currents into B""" F = F.tocsc() inB = (F[:, B].sum(axis=0)).getA()[0, :] F = F.tocoo() M = F.shape[0] data_old = F.data row_old = F.row col_old = F.col """Add currents from new A=[n,] to all states in A""" row1 = np.zeros(outA.shape[0], dtype=np.int) row1[:] = M col1 = np.array(A) data1 = outA """Add currents from old B to new B=[n+1,]""" row2 = np.array(B) col2 = np.zeros(inB.shape[0], dtype=np.int) col2[:] = M + 1 data2 = inB """Stack data, row and col arrays""" data = np.hstack((data_old, data1, data2)) row = np.hstack((row_old, row1, row2)) col = np.hstack((col_old, col1, col2)) """New netflux matrix""" F_new = coo_matrix((data, (row, col)), shape=(M + 2, M + 2)) return F_new, M, M + 1
<SYSTEM_TASK:> Check a string is utf-8 encoded <END_TASK> <USER_TASK:> Description: def _is_utf_8(txt): """ Check a string is utf-8 encoded :param bytes txt: utf-8 string :return: Whether the string\ is utf-8 encoded or not :rtype: bool """
assert isinstance(txt, six.binary_type) try: _ = six.text_type(txt, 'utf-8') except (TypeError, UnicodeEncodeError): return False else: return True
<SYSTEM_TASK:> Load script files into the context.\ <END_TASK> <USER_TASK:> Description: def load_libs(self, scripts_paths): """ Load script files into the context.\ This can be thought as the HTML script tag.\ The files content must be utf-8 encoded. This is a shortcut for reading the files\ and pass the content to :py:func:`run_script` :param list scripts_paths: Script file paths. :raises OSError: If there was an error\ manipulating the files. This should not\ normally be caught :raises V8Error: if there was\ an error running the JS script """
for path in scripts_paths: self.run_script(_read_file(path), identifier=path)
<SYSTEM_TASK:> Run a JS script within the context.\ <END_TASK> <USER_TASK:> Description: def run_script(self, script, identifier=_DEFAULT_SCRIPT_NAME): """ Run a JS script within the context.\ All code is ran synchronously,\ there is no event loop. It's thread-safe :param script: utf-8 encoded or unicode string :type script: bytes or str :param identifier: utf-8 encoded or unicode string.\ This is used as the name of the script\ (ie: in stack-traces) :type identifier: bytes or str :return: Result of running the JS script :rtype: str :raises V8Error: if there was\ an error running the JS script """
assert isinstance(script, six.text_type) or _is_utf_8(script) assert isinstance(identifier, six.text_type) or _is_utf_8(identifier) if isinstance(script, six.text_type): script = script.encode('utf-8') if isinstance(identifier, six.text_type): identifier = identifier.encode('utf-8') with _String() as output: with _String() as error: code = lib.v8cffi_run_script( self._c_context[0], script, len(script), identifier, len(identifier), output.string_ptr, output.len_ptr, error.string_ptr, error.len_ptr) if code != lib.E_V8_OK: raise exceptions.get_exception(code)(six.text_type(error)) return six.text_type(output)
<SYSTEM_TASK:> r"""Compute eigenvalues of given transition matrix. <END_TASK> <USER_TASK:> Description: def eigenvalues(T, k=None, reversible=False, mu=None): r"""Compute eigenvalues of given transition matrix. Parameters ---------- T : (d, d) ndarray Transition matrix (stochastic matrix) k : int or tuple of ints, optional Compute the first k eigenvalues of T reversible : bool, optional Indicate that transition matrix is reversible mu : (d,) ndarray, optional Stationary distribution of T Returns ------- eig : (n,) ndarray, The eigenvalues of T ordered with decreasing absolute value. If k is None then n=d, if k is int then n=k otherwise n is the length of the given tuple of eigenvalue indices. Notes ----- Eigenvalues are computed using the numpy.linalg interface for the corresponding LAPACK routines. If reversible=True the the eigenvalues of the similar symmetric matrix `\sqrt(\mu_i / \mu_j) p_{ij}` will be computed. The precomputed stationary distribution will only be used if reversible=True. """
if reversible: try: evals = eigenvalues_rev(T, k=k, mu=mu) except: evals = eigvals(T).real # use fallback code but cast to real else: evals = eigvals(T) # nonreversible """Sort by decreasing absolute value""" ind = np.argsort(np.abs(evals))[::-1] evals = evals[ind] if isinstance(k, (list, set, tuple)): try: return [evals[n] for n in k] except IndexError: raise ValueError("given indices do not exist: ", k) elif k is not None: return evals[: k] else: return evals
<SYSTEM_TASK:> r"""Compute eigenvalues of reversible transition matrix. <END_TASK> <USER_TASK:> Description: def eigenvalues_rev(T, k=None, mu=None): r"""Compute eigenvalues of reversible transition matrix. Parameters ---------- T : (d, d) ndarray Transition matrix (stochastic matrix) k : int or tuple of ints, optional Compute the first k eigenvalues of T mu : (d,) ndarray, optional Stationary distribution of T Returns ------- eig : (n,) ndarray, The eigenvalues of T ordered with decreasing absolute value. If k is None then n=d, if k is int then n=k otherwise n is the length of the given tuple of eigenvalue indices. Raises ------ ValueError If stationary distribution is nonpositive. """
"""compute stationary distribution if not given""" if mu is None: mu = stationary_distribution(T) if np.any(mu <= 0): raise ValueError('Cannot symmetrize transition matrix') """ symmetrize T """ smu = np.sqrt(mu) S = smu[:,None] * T / smu """ symmetric eigenvalue problem """ evals = eigvalsh(S) return evals
<SYSTEM_TASK:> r"""Decomposition into left and right eigenvectors. <END_TASK> <USER_TASK:> Description: def rdl_decomposition_nrev(T, norm='standard'): r"""Decomposition into left and right eigenvectors. Parameters ---------- T : (M, M) ndarray Transition matrix norm: {'standard', 'reversible'} standard: (L'R) = Id, L[:,0] is a probability distribution, the stationary distribution mu of T. Right eigenvectors R have a 2-norm of 1 reversible: R and L are related via L=L[:,0]*R Returns ------- R : (M, M) ndarray The normalized (with respect to L) right eigenvectors, such that the column R[:,i] is the right eigenvector corresponding to the eigenvalue w[i], dot(T,R[:,i])=w[i]*R[:,i] D : (M, M) ndarray A diagonal matrix containing the eigenvalues, each repeated according to its multiplicity L : (M, M) ndarray The normalized (with respect to `R`) left eigenvectors, such that the row ``L[i, :]`` is the left eigenvector corresponding to the eigenvalue ``w[i]``, ``dot(L[i, :], T)``=``w[i]*L[i, :]`` """
d = T.shape[0] w, R = eig(T) """Sort by decreasing magnitude of eigenvalue""" ind = np.argsort(np.abs(w))[::-1] w = w[ind] R = R[:, ind] """Diagonal matrix containing eigenvalues""" D = np.diag(w) # Standard norm: Euclidean norm is 1 for r and LR = I. if norm == 'standard': L = solve(np.transpose(R), np.eye(d)) """l1- normalization of L[:, 0]""" R[:, 0] = R[:, 0] * np.sum(L[:, 0]) L[:, 0] = L[:, 0] / np.sum(L[:, 0]) return R, D, np.transpose(L) # Reversible norm: elif norm == 'reversible': b = np.zeros(d) b[0] = 1.0 A = np.transpose(R) nu = solve(A, b) mu = nu / np.sum(nu) """Ensure that R[:,0] is positive""" R[:, 0] = R[:, 0] / np.sign(R[0, 0]) """Use mu to connect L and R""" L = mu[:, np.newaxis] * R """Compute overlap""" s = np.diag(np.dot(np.transpose(L), R)) """Renormalize left-and right eigenvectors to ensure L'R=Id""" R = R / np.sqrt(s[np.newaxis, :]) L = L / np.sqrt(s[np.newaxis, :]) return R, D, np.transpose(L) else: raise ValueError("Keyword 'norm' has to be either 'standard' or 'reversible'")
<SYSTEM_TASK:> r"""Decomposition into left and right eigenvectors for reversible <END_TASK> <USER_TASK:> Description: def rdl_decomposition_rev(T, norm='reversible', mu=None): r"""Decomposition into left and right eigenvectors for reversible transition matrices. Parameters ---------- T : (M, M) ndarray Transition matrix norm: {'standard', 'reversible'} standard: (L'R) = Id, L[:,0] is a probability distribution, the stationary distribution mu of T. Right eigenvectors R have a 2-norm of 1. reversible: R and L are related via L=L[:,0]*R. mu : (M,) ndarray, optional Stationary distribution of T Returns ------- R : (M, M) ndarray The normalized (with respect to L) right eigenvectors, such that the column R[:,i] is the right eigenvector corresponding to the eigenvalue w[i], dot(T,R[:,i])=w[i]*R[:,i] D : (M, M) ndarray A diagonal matrix containing the eigenvalues, each repeated according to its multiplicity L : (M, M) ndarray The normalized (with respect to `R`) left eigenvectors, such that the row ``L[i, :]`` is the left eigenvector corresponding to the eigenvalue ``w[i]``, ``dot(L[i, :], T)``=``w[i]*L[i, :]`` Notes ----- The eigenvalues and eigenvectors of the similar symmetric matrix `\sqrt(\mu_i / \mu_j) p_{ij}` will be used to compute the eigenvalues and eigenvectors of T. The stationay distribution will be computed if no precomputed stationary distribution is given. """
if mu is None: mu = stationary_distribution(T) """ symmetrize T """ smu = np.sqrt(mu) S = smu[:,None] * T / smu val, eigvec = eigh(S) """Sort eigenvalues and eigenvectors""" perm = np.argsort(np.abs(val))[::-1] val = val[perm] eigvec = eigvec[:, perm] """Diagonal matrix of eigenvalues""" D = np.diag(val) """Right and left eigenvectors""" R = eigvec / smu[:, np.newaxis] L = eigvec * smu[:, np.newaxis] """Ensure that R[:,0] is positive and unity""" tmp = R[0, 0] R[:, 0] = R[:, 0] / tmp """Ensure that L[:, 0] is probability vector""" L[:, 0] = L[:, 0] * tmp if norm == 'reversible': return R, D, L.T elif norm == 'standard': """Standard l2-norm of right eigenvectors""" w = np.diag(np.dot(R.T, R)) sw = np.sqrt(w) """Don't change normalization of eigenvectors for dominant eigenvalue""" sw[0] = 1.0 R = R / sw[np.newaxis, :] L = L * sw[np.newaxis, :] return R, D, L.T else: raise ValueError("Keyword 'norm' has to be either 'standard' or 'reversible'")
<SYSTEM_TASK:> r"""Compute implied time scales from given eigenvalues <END_TASK> <USER_TASK:> Description: def timescales_from_eigenvalues(evals, tau=1): r"""Compute implied time scales from given eigenvalues Parameters ---------- evals : eigenvalues tau : lag time Returns ------- ts : ndarray The implied time scales to the given eigenvalues, in the same order. """
"""Check for dominant eigenvalues with large imaginary part""" if not np.allclose(evals.imag, 0.0): warnings.warn('Using eigenvalues with non-zero imaginary part', ImaginaryEigenValueWarning) """Check for multiple eigenvalues of magnitude one""" ind_abs_one = np.isclose(np.abs(evals), 1.0, rtol=0.0, atol=1e-14) if sum(ind_abs_one) > 1: warnings.warn('Multiple eigenvalues with magnitude one.', SpectralWarning) """Compute implied time scales""" ts = np.zeros(len(evals)) """Eigenvalues of magnitude one imply infinite timescale""" ts[ind_abs_one] = np.inf """All other eigenvalues give rise to finite timescales""" ts[np.logical_not(ind_abs_one)] = \ -1.0 * tau / np.log(np.abs(evals[np.logical_not(ind_abs_one)])) return ts
<SYSTEM_TASK:> Determine if the given filename indicates a dense or a sparse matrix <END_TASK> <USER_TASK:> Description: def is_sparse_file(filename): """Determine if the given filename indicates a dense or a sparse matrix If pathname is xxx.coo.yyy return True otherwise False. """
dirname, basename = os.path.split(filename) name, ext = os.path.splitext(basename) matrix_name, matrix_ext = os.path.splitext(name) if matrix_ext == '.coo': return True else: return False
<SYSTEM_TASK:> r"""Fast computation of the stationary vector using backward <END_TASK> <USER_TASK:> Description: def stationary_distribution_from_backward_iteration(P, eps=1e-15): r"""Fast computation of the stationary vector using backward iteration. Parameters ---------- P : (M, M) scipy.sparse matrix Transition matrix eps : float (optional) Perturbation parameter for the true eigenvalue. Returns ------- pi : (M,) ndarray Stationary vector """
A = P.transpose() mu = 1.0 - eps x0 = np.ones(P.shape[0]) y = backward_iteration(A, mu, x0) pi = y / y.sum() return pi
<SYSTEM_TASK:> r"""Compute the eigenvalues of a sparse transition matrix. <END_TASK> <USER_TASK:> Description: def eigenvalues(T, k=None, ncv=None, reversible=False, mu=None): r"""Compute the eigenvalues of a sparse transition matrix. Parameters ---------- T : (M, M) scipy.sparse matrix Transition matrix k : int, optional Number of eigenvalues to compute. ncv : int, optional The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k reversible : bool, optional Indicate that transition matrix is reversible mu : (M,) ndarray, optional Stationary distribution of T Returns ------- v : (k,) ndarray Eigenvalues of T Notes ----- The first k eigenvalues of largest magnitude are computed. If reversible=True the the eigenvalues of the similar symmetric matrix `\sqrt(\mu_i / \mu_j) p_{ij}` will be computed. The precomputed stationary distribution will only be used if reversible=True. """
if k is None: raise ValueError("Number of eigenvalues required for decomposition of sparse matrix") else: if reversible: try: v = eigenvalues_rev(T, k, ncv=ncv, mu=mu) except: # use fallback code, but cast to real v = scipy.sparse.linalg.eigs(T, k=k, which='LM', return_eigenvectors=False, ncv=ncv).real else: v = scipy.sparse.linalg.eigs(T, k=k, which='LM', return_eigenvectors=False, ncv=ncv) ind = np.argsort(np.abs(v))[::-1] return v[ind]
<SYSTEM_TASK:> r"""Compute the eigenvalues of a reversible, sparse transition matrix. <END_TASK> <USER_TASK:> Description: def eigenvalues_rev(T, k, ncv=None, mu=None): r"""Compute the eigenvalues of a reversible, sparse transition matrix. Parameters ---------- T : (M, M) scipy.sparse matrix Transition matrix k : int Number of eigenvalues to compute. ncv : int, optional The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k mu : (M,) ndarray, optional Stationary distribution of T Returns ------- v : (k,) ndarray Eigenvalues of T Raises ------ ValueError If stationary distribution is nonpositive. Notes ----- The first k eigenvalues of largest magnitude are computed. """
"""compute stationary distribution if not given""" if mu is None: mu = stationary_distribution(T) if np.any(mu <= 0): raise ValueError('Cannot symmetrize transition matrix') """ symmetrize T """ smu = np.sqrt(mu) D = diags(smu, 0) Dinv = diags(1.0/smu, 0) S = (D.dot(T)).dot(Dinv) """Compute eigenvalues using a solver for symmetric/hermititan eigenproblems""" evals = scipy.sparse.linalg.eigsh(S, k=k, ncv=ncv, which='LM', return_eigenvectors=False) return evals
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def number_of_states(dtrajs): r""" Determine the number of states from a set of discrete trajectories Parameters ---------- dtrajs : list of int-arrays discrete trajectories """
# determine number of states n nmax = 0 for dtraj in dtrajs: nmax = max(nmax, np.max(dtraj)) # return number of states return nmax + 1
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def determine_lengths(dtrajs): r""" Determines the lengths of all trajectories Parameters ---------- dtrajs : list of int-arrays discrete trajectories """
if (isinstance(dtrajs[0], (int))): return len(dtrajs) * np.ones((1)) lengths = np.zeros((len(dtrajs))) for i in range(len(dtrajs)): lengths[i] = len(dtrajs[i]) return lengths
<SYSTEM_TASK:> Samples n counts at the given lagtime from the given trajectory <END_TASK> <USER_TASK:> Description: def bootstrap_counts_singletraj(dtraj, lagtime, n): """ Samples n counts at the given lagtime from the given trajectory """
# check if length is sufficient L = len(dtraj) if (lagtime > L): raise ValueError( 'Cannot sample counts with lagtime ' + str(lagtime) + ' from a trajectory with length ' + str(L)) # sample I = np.random.randint(0, L - lagtime - 1, size=n) J = I + lagtime # return state pairs return (dtraj[I], dtraj[J])
<SYSTEM_TASK:> r"""Compute connected components for a directed graph with weights <END_TASK> <USER_TASK:> Description: def connected_sets(C, directed=True): r"""Compute connected components for a directed graph with weights represented by the given count matrix. Parameters ---------- C : scipy.sparse matrix or numpy ndarray square matrix specifying edge weights. directed : bool, optional Whether to compute connected components for a directed or undirected graph. Default is True. Returns ------- cc : list of arrays of integers Each entry is an array containing all vertices (states) in the corresponding connected component. """
M = C.shape[0] """ Compute connected components of C. nc is the number of components, indices contain the component labels of the states """ nc, indices = csgraph.connected_components(C, directed=directed, connection='strong') states = np.arange(M) # Discrete states """Order indices""" ind = np.argsort(indices) indices = indices[ind] """Order states""" states = states[ind] """ The state index tuple is now of the following form (states, indices)=([s_23, s_17,...,s_3, s_2, ...], [0, 0, ..., 1, 1, ...]) """ """Find number of states per component""" count = np.bincount(indices) """Cumulative sum of count gives start and end indices of components""" csum = np.zeros(len(count) + 1, dtype=int) csum[1:] = np.cumsum(count) """Generate list containing components, sort each component by increasing state label""" cc = [] for i in range(nc): cc.append(np.sort(states[csum[i]:csum[i + 1]])) """Sort by size of component - largest component first""" cc = sorted(cc, key=lambda x: -len(x)) return cc
<SYSTEM_TASK:> r"""Compute the count matrix of the largest connected set. <END_TASK> <USER_TASK:> Description: def largest_connected_submatrix(C, directed=True, lcc=None): r"""Compute the count matrix of the largest connected set. The input count matrix is used as a weight matrix for the construction of a directed graph. The largest connected set of the constructed graph is computed. Vertices belonging to the largest connected component are used to generate a completely connected subgraph. The weight matrix of the subgraph is the desired completely connected count matrix. Parameters ---------- C : scipy.sparse matrix or numpy ndarray Count matrix specifying edge weights directed : bool, optional Whether to compute connected components for a directed or undirected graph. Default is True lcc : (M,) ndarray, optional The largest connected set Returns ------- C_cc : scipy.sparse matrix Count matrix of largest completely connected set of vertices (states) """
if lcc is None: lcc = largest_connected_set(C, directed=directed) """Row slicing""" if scipy.sparse.issparse(C): C_cc = C.tocsr() else: C_cc = C C_cc = C_cc[lcc, :] """Column slicing""" if scipy.sparse.issparse(C): C_cc = C_cc.tocsc() C_cc = C_cc[:, lcc] if scipy.sparse.issparse(C): return C_cc.tocoo() else: return C_cc
<SYSTEM_TASK:> r"""Return true, if the input count matrix is completely connected. <END_TASK> <USER_TASK:> Description: def is_connected(C, directed=True): r"""Return true, if the input count matrix is completely connected. Effectively checking if the number of connected components equals one. Parameters ---------- C : scipy.sparse matrix or numpy ndarray Count matrix specifying edge weights. directed : bool, optional Whether to compute connected components for a directed or undirected graph. Default is True. Returns ------- connected : boolean, returning true only if C is connected. """
nc = csgraph.connected_components(C, directed=directed, connection='strong', \ return_labels=False) return nc == 1
<SYSTEM_TASK:> r"""Coarse-grains the flux to the given sets. <END_TASK> <USER_TASK:> Description: def coarsegrain(F, sets): r"""Coarse-grains the flux to the given sets. Parameters ---------- F : (n, n) ndarray or scipy.sparse matrix Matrix of flux values between pairs of states. sets : list of array-like of ints The sets of states onto which the flux is coarse-grained. Notes ----- The coarse grained flux is defined as .. math:: fc_{I,J} = \sum_{i \in I,j \in J} f_{i,j} Note that if you coarse-grain a net flux, it does n ot necessarily have a net flux property anymore. If want to make sure you get a netflux, use to_netflux(coarsegrain(F,sets)). References ---------- .. [1] F. Noe, Ch. Schuette, E. Vanden-Eijnden, L. Reich and T. Weikl: Constructing the Full Ensemble of Folding Pathways from Short Off-Equilibrium Simulations. Proc. Natl. Acad. Sci. USA, 106, 19011-19016 (2009) """
if issparse(F): return sparse.tpt.coarsegrain(F, sets) elif isdense(F): return dense.tpt.coarsegrain(F, sets) else: raise _type_not_supported
<SYSTEM_TASK:> r"""Compute the total flux, or turnover flux, that is produced by <END_TASK> <USER_TASK:> Description: def total_flux(F, A=None): r"""Compute the total flux, or turnover flux, that is produced by the flux sources and consumed by the flux sinks. Parameters ---------- F : (M, M) ndarray Matrix of flux values between pairs of states. A : array_like (optional) List of integer state labels for set A (reactant) Returns ------- F : float The total flux, or turnover flux, that is produced by the flux sources and consumed by the flux sinks References ---------- .. [1] P. Metzner, C. Schuette and E. Vanden-Eijnden. Transition Path Theory for Markov Jump Processes. Multiscale Model Simul 7: 1192-1219 (2009) """
if issparse(F): return sparse.tpt.total_flux(F, A=A) elif isdense(F): return dense.tpt.total_flux(F, A=A) else: raise _type_not_supported
<SYSTEM_TASK:> r"""Mean first passage time for reaction A to B. <END_TASK> <USER_TASK:> Description: def mfpt(totflux, pi, qminus): r"""Mean first passage time for reaction A to B. Parameters ---------- totflux : float The total flux between reactant and product pi : (M,) ndarray Stationary distribution qminus : (M,) ndarray Backward comittor Returns ------- tAB : float The mean first-passage time for the A to B reaction See also -------- rate Notes ----- Equal to the inverse rate, see [1]. References ---------- .. [1] F. Noe, Ch. Schuette, E. Vanden-Eijnden, L. Reich and T. Weikl: Constructing the Full Ensemble of Folding Pathways from Short Off-Equilibrium Simulations. Proc. Natl. Acad. Sci. USA, 106, 19011-19016 (2009) """
return dense.tpt.mfpt(totflux, pi, qminus)
<SYSTEM_TASK:> PCCA+ spectral clustering method using the inner simplex algorithm. <END_TASK> <USER_TASK:> Description: def _pcca_connected_isa(evec, n_clusters): """ PCCA+ spectral clustering method using the inner simplex algorithm. Clusters the first n_cluster eigenvectors of a transition matrix in order to cluster the states. This function assumes that the state space is fully connected, i.e. the transition matrix whose eigenvectors are used is supposed to have only one eigenvalue 1, and the corresponding first eigenvector (evec[:,0]) must be constant. Parameters ---------- eigenvectors : ndarray A matrix with the sorted eigenvectors in the columns. The stationary eigenvector should be first, then the one to the slowest relaxation process, etc. n_clusters : int Number of clusters to group to. Returns ------- (chi, rot_mat) chi : ndarray (n x m) A matrix containing the probability or membership of each state to be assigned to each cluster. The rows sum to 1. rot_mat : ndarray (m x m) A rotation matrix that rotates the dominant eigenvectors to yield the PCCA memberships, i.e.: chi = np.dot(evec, rot_matrix References ---------- [1] P. Deuflhard and M. Weber, Robust Perron cluster analysis in conformation dynamics. in: Linear Algebra Appl. 398C M. Dellnitz and S. Kirkland and M. Neumann and C. Schuette (Editors) Elsevier, New York, 2005, pp. 161-184 """
(n, m) = evec.shape # do we have enough eigenvectors? if n_clusters > m: raise ValueError("Cannot cluster the (" + str(n) + " x " + str(m) + " eigenvector matrix to " + str(n_clusters) + " clusters.") # check if the first, and only the first eigenvector is constant diffs = np.abs(np.max(evec, axis=0) - np.min(evec, axis=0)) assert diffs[0] < 1e-6, "First eigenvector is not constant. This indicates that the transition matrix " \ "is not connected or the eigenvectors are incorrectly sorted. Cannot do PCCA." assert diffs[1] > 1e-6, "An eigenvector after the first one is constant. " \ "Probably the eigenvectors are incorrectly sorted. Cannot do PCCA." # local copy of the eigenvectors c = evec[:, list(range(n_clusters))] ortho_sys = np.copy(c) max_dist = 0.0 # representative states ind = np.zeros(n_clusters, dtype=np.int32) # select the first representative as the most outlying point for (i, row) in enumerate(c): if np.linalg.norm(row, 2) > max_dist: max_dist = np.linalg.norm(row, 2) ind[0] = i # translate coordinates to make the first representative the origin ortho_sys -= c[ind[0], None] # select the other m-1 representatives using a Gram-Schmidt orthogonalization for k in range(1, n_clusters): max_dist = 0.0 temp = np.copy(ortho_sys[ind[k - 1]]) # select next farthest point that is not yet a representative for (i, row) in enumerate(ortho_sys): row -= np.dot(np.dot(temp, np.transpose(row)), temp) distt = np.linalg.norm(row, 2) if distt > max_dist and i not in ind[0:k]: max_dist = distt ind[k] = i ortho_sys /= np.linalg.norm(ortho_sys[ind[k]], 2) # print "Final selection ", ind # obtain transformation matrix of eigenvectors to membership matrix rot_mat = np.linalg.inv(c[ind]) #print "Rotation matrix \n ", rot_mat # compute membership matrix chi = np.dot(c, rot_mat) #print "chi \n ", chi return (chi, rot_mat)
<SYSTEM_TASK:> Optimizes the PCCA+ rotation matrix such that the memberships are exclusively nonnegative. <END_TASK> <USER_TASK:> Description: def _opt_soft(eigvectors, rot_matrix, n_clusters): """ Optimizes the PCCA+ rotation matrix such that the memberships are exclusively nonnegative. Parameters ---------- eigenvectors : ndarray A matrix with the sorted eigenvectors in the columns. The stationary eigenvector should be first, then the one to the slowest relaxation process, etc. rot_mat : ndarray (m x m) nonoptimized rotation matrix n_clusters : int Number of clusters to group to. Returns ------- rot_mat : ndarray (m x m) Optimized rotation matrix that rotates the dominant eigenvectors to yield the PCCA memberships, i.e.: chi = np.dot(evec, rot_matrix References ---------- [1] S. Roeblitz and M. Weber, Fuzzy spectral clustering by PCCA+: application to Markov state models and data classification. Adv Data Anal Classif 7, 147-179 (2013). """
# only consider first n_clusters eigenvectors eigvectors = eigvectors[:, :n_clusters] # crop first row and first column from rot_matrix # rot_crop_matrix = rot_matrix[1:,1:] rot_crop_matrix = rot_matrix[1:][:, 1:] (x, y) = rot_crop_matrix.shape # reshape rot_crop_matrix into linear vector rot_crop_vec = np.reshape(rot_crop_matrix, x * y) # Susanna Roeblitz' target function for optimization def susanna_func(rot_crop_vec, eigvectors): # reshape into matrix rot_crop_matrix = np.reshape(rot_crop_vec, (x, y)) # fill matrix rot_matrix = _fill_matrix(rot_crop_matrix, eigvectors) result = 0 for i in range(0, n_clusters): for j in range(0, n_clusters): result += np.power(rot_matrix[j, i], 2) / rot_matrix[0, i] return -result from scipy.optimize import fmin rot_crop_vec_opt = fmin(susanna_func, rot_crop_vec, args=(eigvectors,), disp=False) rot_crop_matrix = np.reshape(rot_crop_vec_opt, (x, y)) rot_matrix = _fill_matrix(rot_crop_matrix, eigvectors) return rot_matrix
<SYSTEM_TASK:> Helper function for opt_soft <END_TASK> <USER_TASK:> Description: def _fill_matrix(rot_crop_matrix, eigvectors): """ Helper function for opt_soft """
(x, y) = rot_crop_matrix.shape row_sums = np.sum(rot_crop_matrix, axis=1) row_sums = np.reshape(row_sums, (x, 1)) # add -row_sums as leftmost column to rot_crop_matrix rot_crop_matrix = np.concatenate((-row_sums, rot_crop_matrix), axis=1) tmp = -np.dot(eigvectors[:, 1:], rot_crop_matrix) tmp_col_max = np.max(tmp, axis=0) tmp_col_max = np.reshape(tmp_col_max, (1, y + 1)) tmp_col_max_sum = np.sum(tmp_col_max) # add col_max as top row to rot_crop_matrix and normalize rot_matrix = np.concatenate((tmp_col_max, rot_crop_matrix), axis=0) rot_matrix /= tmp_col_max_sum return rot_matrix
<SYSTEM_TASK:> Coarse-grains transition matrix P to n sets using PCCA <END_TASK> <USER_TASK:> Description: def coarsegrain(P, n): """ Coarse-grains transition matrix P to n sets using PCCA Coarse-grains transition matrix P such that the dominant eigenvalues are preserved, using: ..math: \tilde{P} = M^T P M (M^T M)^{-1} See [2]_ for the derivation of this form from the coarse-graining method first derived in [1]_. References ---------- [1] S. Kube and M. Weber A coarse graining method for the identification of transition rates between molecular conformations. J. Chem. Phys. 126, 024103 (2007) [2] F. Noe, H. Wu, J.-H. Prinz and N. Plattner: Projected and hidden Markov models for calculating kinetics and metastable states of complex molecules J. Chem. Phys. 139, 184114 (2013) """
M = pcca(P, n) # coarse-grained transition matrix W = np.linalg.inv(np.dot(M.T, M)) A = np.dot(np.dot(M.T, P), M) P_coarse = np.dot(W, A) # symmetrize and renormalize to eliminate numerical errors from msmtools.analysis import stationary_distribution pi_coarse = np.dot(M.T, stationary_distribution(P)) X = np.dot(np.diag(pi_coarse), P_coarse) P_coarse = X / X.sum(axis=1)[:, None] return P_coarse
<SYSTEM_TASK:> r"""Check if the given matrix is a transition matrix. <END_TASK> <USER_TASK:> Description: def is_transition_matrix(T, tol=1e-12): r"""Check if the given matrix is a transition matrix. Parameters ---------- T : (M, M) ndarray or scipy.sparse matrix Matrix to check tol : float (optional) Floating point tolerance to check with Returns ------- is_transition_matrix : bool True, if T is a valid transition matrix, False otherwise Notes ----- A valid transition matrix :math:`P=(p_{ij})` has non-negative elements, :math:`p_{ij} \geq 0`, and elements of each row sum up to one, :math:`\sum_j p_{ij} = 1`. Matrices wit this property are also called stochastic matrices. Examples -------- >>> import numpy as np >>> from msmtools.analysis import is_transition_matrix >>> A = np.array([[0.4, 0.5, 0.3], [0.2, 0.4, 0.4], [-1, 1, 1]]) >>> is_transition_matrix(A) False >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> is_transition_matrix(T) True """
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric') if _issparse(T): return sparse.assessment.is_transition_matrix(T, tol) else: return dense.assessment.is_transition_matrix(T, tol)
<SYSTEM_TASK:> r"""Check if the given matrix is a rate matrix. <END_TASK> <USER_TASK:> Description: def is_rate_matrix(K, tol=1e-12): r"""Check if the given matrix is a rate matrix. Parameters ---------- K : (M, M) ndarray or scipy.sparse matrix Matrix to check tol : float (optional) Floating point tolerance to check with Returns ------- is_rate_matrix : bool True, if K is a valid rate matrix, False otherwise Notes ----- A valid rate matrix :math:`K=(k_{ij})` has non-negative off diagonal elements, :math:`k_{ij} \leq 0`, for :math:`i \neq j`, and elements of each row sum up to zero, :math:`\sum_{j} k_{ij}=0`. Examples -------- >>> import numpy as np >>> from msmtools.analysis import is_rate_matrix >>> A = np.array([[0.5, -0.5, -0.2], [-0.3, 0.6, -0.3], [-0.2, 0.2, 0.0]]) >>> is_rate_matrix(A) False >>> K = np.array([[-0.3, 0.2, 0.1], [0.5, -0.5, 0.0], [0.1, 0.1, -0.2]]) >>> is_rate_matrix(K) True """
K = _types.ensure_ndarray_or_sparse(K, ndim=2, uniform=True, kind='numeric') if _issparse(K): return sparse.assessment.is_rate_matrix(K, tol) else: return dense.assessment.is_rate_matrix(K, tol)
<SYSTEM_TASK:> r"""Check connectivity of the given matrix. <END_TASK> <USER_TASK:> Description: def is_connected(T, directed=True): r"""Check connectivity of the given matrix. Parameters ---------- T : (M, M) ndarray or scipy.sparse matrix Matrix to check directed : bool (optional) If True respect direction of transitions, if False do not distinguish between forward and backward transitions Returns ------- is_connected : bool True, if T is connected, False otherwise Notes ----- A transition matrix :math:`T=(t_{ij})` is connected if for any pair of states :math:`(i, j)` one can reach state :math:`j` from state :math:`i` in a finite number of steps. In more precise terms: For any pair of states :math:`(i, j)` there exists a number :math:`N=N(i, j)`, so that the probability of going from state :math:`i` to state :math:`j` in :math:`N` steps is positive, :math:`\mathbb{P}(X_{N}=j|X_{0}=i)>0`. A transition matrix with this property is also called irreducible. Viewing the transition matrix as the adjency matrix of a (directed) graph the transition matrix is irreducible if and only if the corresponding graph has a single connected component. Connectivity of a graph can be efficiently checked using Tarjan's algorithm. References ---------- .. [1] Hoel, P G and S C Port and C J Stone. 1972. Introduction to Stochastic Processes. .. [2] Tarjan, R E. 1972. Depth-first search and linear graph algorithms. SIAM Journal on Computing 1 (2): 146-160. Examples -------- >>> import numpy as np >>> from msmtools.analysis import is_connected >>> A = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.0, 1.0]]) >>> is_connected(A) False >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> is_connected(T) True """
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric') if _issparse(T): return sparse.assessment.is_connected(T, directed=directed) else: T = _csr_matrix(T) return sparse.assessment.is_connected(T, directed=directed)
<SYSTEM_TASK:> r"""Check reversibility of the given transition matrix. <END_TASK> <USER_TASK:> Description: def is_reversible(T, mu=None, tol=1e-12): r"""Check reversibility of the given transition matrix. Parameters ---------- T : (M, M) ndarray or scipy.sparse matrix Transition matrix mu : (M,) ndarray (optional) Test reversibility with respect to this vector tol : float (optional) Floating point tolerance to check with Returns ------- is_reversible : bool True, if T is reversible, False otherwise Notes ----- A transition matrix :math:`T=(t_{ij})` is reversible with respect to a probability vector :math:`\mu=(\mu_i)` if the follwing holds, .. math:: \mu_i \, t_{ij}= \mu_j \, t_{ji}. In this case :math:`\mu` is the stationary vector for :math:`T`, so that :math:`\mu^T T = \mu^T`. If the stationary vector is unknown it is computed from :math:`T` before reversibility is checked. A reversible transition matrix has purely real eigenvalues. The left eigenvectors :math:`(l_i)` can be computed from right eigenvectors :math:`(r_i)` via :math:`l_i=\mu_i r_i`. Examples -------- >>> import numpy as np >>> from msmtools.analysis import is_reversible >>> P = np.array([[0.8, 0.1, 0.1], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> is_reversible(P) False >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> is_reversible(T) True """
# check input T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric') mu = _types.ensure_float_vector_or_None(mu, require_order=True) # go if _issparse(T): return sparse.assessment.is_reversible(T, mu, tol) else: return dense.assessment.is_reversible(T, mu, tol)
<SYSTEM_TASK:> r"""Compute implied time scales of given transition matrix. <END_TASK> <USER_TASK:> Description: def timescales(T, tau=1, k=None, ncv=None, reversible=False, mu=None): r"""Compute implied time scales of given transition matrix. Parameters ---------- T : (M, M) ndarray or scipy.sparse matrix Transition matrix tau : int (optional) The time-lag (in elementary time steps of the microstate trajectory) at which the given transition matrix was constructed. k : int (optional) Compute the first `k` implied time scales. ncv : int (optional, for sparse T only) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k reversible : bool, optional Indicate that transition matrix is reversible mu : (M,) ndarray, optional Stationary distribution of T Returns ------- ts : (M,) ndarray The implied time scales of the transition matrix. If `k` is not None then the shape of `ts` is (k,). Notes ----- The implied time scale :math:`t_i` is defined as .. math:: t_i=-\frac{\tau}{\log \lvert \lambda_i \rvert} If reversible=True the the eigenvalues of the similar symmetric matrix `\sqrt(\mu_i / \mu_j) p_{ij}` will be computed. The precomputed stationary distribution will only be used if reversible=True. Examples -------- >>> import numpy as np >>> from msmtools.analysis import timescales >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> ts = timescales(T) >>> ts array([ inf, 9.49122158, 0.43429448]) """
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric') if _issparse(T): return sparse.decomposition.timescales(T, tau=tau, k=k, ncv=ncv, reversible=reversible, mu=mu) else: return dense.decomposition.timescales(T, tau=tau, k=k, reversible=reversible, mu=mu)
<SYSTEM_TASK:> r"""Compute the committor between sets of microstates. <END_TASK> <USER_TASK:> Description: def committor(T, A, B, forward=True, mu=None): r"""Compute the committor between sets of microstates. The committor assigns to each microstate a probability that being at this state, the set B will be hit next, rather than set A (forward committor), or that the set A has been hit previously rather than set B (backward committor). See [1] for a detailed mathematical description. The present implementation uses the equations given in [2]. Parameters ---------- T : (M, M) ndarray or scipy.sparse matrix Transition matrix A : array_like List of integer state labels for set A B : array_like List of integer state labels for set B forward : bool If True compute the forward committor, else compute the backward committor. Returns ------- q : (M,) ndarray Vector of comittor probabilities. Notes ----- Committor functions are used to characterize microstates in terms of their probability to being visited during a reaction/transition between two disjoint regions of state space A, B. **Forward committor** The forward committor :math:`q^{(+)}_i` is defined as the probability that the process starting in `i` will reach `B` first, rather than `A`. Using the first hitting time of a set :math:`S`, .. math:: T_{S}=\inf\{t \geq 0 | X_t \in S \} the forward committor :math:`q^{(+)}_i` can be fromally defined as .. math:: q^{(+)}_i=\mathbb{P}_{i}(T_{A}<T_{B}). The forward committor solves to the following boundary value problem .. math:: \begin{array}{rl} \sum_j L_{ij} q^{(+)}_{j}=0 & i \in X \setminus{(A \cup B)} \\ q_{i}^{(+)}=0 & i \in A \\ q_{i}^{(+)}=1 & i \in B \end{array} :math:`L=T-I` denotes the generator matrix. **Backward committor** The backward committor is defined as the probability that the process starting in :math:`x` came from :math:`A` rather than from :math:`B`. Using the last exit time of a set :math:`S`, .. math:: t_{S}=\sup\{t \geq 0 | X_t \notin S \} the backward committor can be formally defined as .. math:: q^{(-)}_i=\mathbb{P}_{i}(t_{A}<t_{B}). The backward comittor solves another boundary value problem .. math:: \begin{array}{rl} \sum_j K_{ij} q^{(-)}_{j}=0 & i \in X \setminus{(A \cup B)} \\ q_{i}^{(-)}=1 & i \in A \\ q_{i}^{(-)}=0 & i \in B \end{array} :math:`K=(D_{\pi}L)^{T}` denotes the adjoint generator matrix. References ---------- .. [1] P. Metzner, C. Schuette and E. Vanden-Eijnden. Transition Path Theory for Markov Jump Processes. Multiscale Model Simul 7: 1192-1219 (2009). .. [2] F. Noe, C. Schuette, E. Vanden-Eijnden, L. Reich and T.Weikl Constructing the Full Ensemble of Folding Pathways from Short Off-Equilibrium Simulations. Proc. Natl. Acad. Sci. USA, 106: 19011-19016 (2009). Examples -------- >>> import numpy as np >>> from msmtools.analysis import committor >>> T = np.array([[0.89, 0.1, 0.01], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> A = [0] >>> B = [2] >>> u_plus = committor(T, A, B) >>> u_plus array([ 0. , 0.5, 1. ]) >>> u_minus = committor(T, A, B, forward=False) >>> u_minus array([ 1. , 0.45454545, 0. ]) """
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric') A = _types.ensure_int_vector(A) B = _types.ensure_int_vector(B) if _issparse(T): if forward: return sparse.committor.forward_committor(T, A, B) else: """ if P is time reversible backward commitor is equal 1 - q+""" if is_reversible(T, mu=mu): return 1.0 - sparse.committor.forward_committor(T, A, B) else: return sparse.committor.backward_committor(T, A, B) else: if forward: return dense.committor.forward_committor(T, A, B) else: """ if P is time reversible backward commitor is equal 1 - q+""" if is_reversible(T, mu=mu): return 1.0 - dense.committor.forward_committor(T, A, B) else: return dense.committor.backward_committor(T, A, B)
<SYSTEM_TASK:> r"""Compute expected transition counts for Markov chain with n steps. <END_TASK> <USER_TASK:> Description: def expected_counts(T, p0, N): r"""Compute expected transition counts for Markov chain with n steps. Parameters ---------- T : (M, M) ndarray or sparse matrix Transition matrix p0 : (M,) ndarray Initial (probability) vector N : int Number of steps to take Returns -------- EC : (M, M) ndarray or sparse matrix Expected value for transition counts after N steps Notes ----- Expected counts can be computed via the following expression .. math:: \mathbb{E}[C^{(N)}]=\sum_{k=0}^{N-1} \text{diag}(p^{T} T^{k}) T Examples -------- >>> import numpy as np >>> from msmtools.analysis import expected_counts >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> p0 = np.array([1.0, 0.0, 0.0]) >>> N = 100 >>> EC = expected_counts(T, p0, N) >>> EC array([[ 45.44616147, 5.0495735 , 0. ], [ 4.50413223, 0. , 4.50413223], [ 0. , 4.04960006, 36.44640052]]) """
# check input T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric') p0 = _types.ensure_float_vector(p0, require_order=True) # go if _issparse(T): return sparse.expectations.expected_counts(p0, T, N) else: return dense.expectations.expected_counts(p0, T, N)
<SYSTEM_TASK:> r"""Dynamical fingerprint for equilibrium correlation experiment. <END_TASK> <USER_TASK:> Description: def fingerprint_correlation(T, obs1, obs2=None, tau=1, k=None, ncv=None): r"""Dynamical fingerprint for equilibrium correlation experiment. Parameters ---------- T : (M, M) ndarray or scipy.sparse matrix Transition matrix obs1 : (M,) ndarray Observable, represented as vector on state space obs2 : (M,) ndarray (optional) Second observable, for cross-correlations k : int (optional) Number of time-scales and amplitudes to compute tau : int (optional) Lag time of given transition matrix, for correct time-scales ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k Returns ------- timescales : (N,) ndarray Time-scales of the transition matrix amplitudes : (N,) ndarray Amplitudes for the correlation experiment See also -------- correlation, fingerprint_relaxation References ---------- .. [1] Noe, F, S Doose, I Daidone, M Loellmann, M Sauer, J D Chodera and J Smith. 2010. Dynamical fingerprints for probing individual relaxation processes in biomolecular dynamics with simulations and kinetic experiments. PNAS 108 (12): 4822-4827. Notes ----- Fingerprints are a combination of time-scale and amplitude spectrum for a equilibrium correlation or a non-equilibrium relaxation experiment. **Auto-correlation** The auto-correlation of an observable :math:`a(x)` for a system in equilibrium is .. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_x \mu(x) a(x, 0) a(x, t) :math:`a(x,0)=a(x)` is the observable at time :math:`t=0`. It can be propagated forward in time using the t-step transition matrix :math:`p^{t}(x, y)`. The propagated observable at time :math:`t` is :math:`a(x, t)=\sum_y p^t(x, y)a(y, 0)`. Using the eigenvlaues and eigenvectors of the transition matrix the autocorrelation can be written as .. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_i \lambda_i^t \langle a, r_i\rangle_{\mu} \langle l_i, a \rangle. The fingerprint amplitudes :math:`\gamma_i` are given by .. math:: \gamma_i=\langle a, r_i\rangle_{\mu} \langle l_i, a \rangle. And the fingerprint time scales :math:`t_i` are given by .. math:: t_i=-\frac{\tau}{\log \lvert \lambda_i \rvert}. **Cross-correlation** The cross-correlation of two observables :math:`a(x)`, :math:`b(x)` is similarly given .. math:: \mathbb{E}_{\mu}[a(x,0)b(x,t)]=\sum_x \mu(x) a(x, 0) b(x, t) The fingerprint amplitudes :math:`\gamma_i` are similarly given in terms of the eigenvectors .. math:: \gamma_i=\langle a, r_i\rangle_{\mu} \langle l_i, b \rangle. Examples -------- >>> import numpy as np >>> from msmtools.analysis import fingerprint_correlation >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> a = np.array([1.0, 0.0, 0.0]) >>> ts, amp = fingerprint_correlation(T, a) >>> ts array([ inf, 9.49122158, 0.43429448]) >>> amp array([ 0.20661157, 0.22727273, 0.02066116]) """
# check if square matrix and remember size T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric') n = T.shape[0] # will not do fingerprint analysis for nonreversible matrices if not is_reversible(T): raise ValueError('Fingerprint calculation is not supported for nonreversible transition matrices. ') obs1 = _types.ensure_ndarray(obs1, ndim=1, size=n, kind='numeric') obs1 = _types.ensure_ndarray_or_None(obs1, ndim=1, size=n, kind='numeric') # go if _issparse(T): return sparse.fingerprints.fingerprint_correlation(T, obs1, obs2=obs2, tau=tau, k=k, ncv=ncv) else: return dense.fingerprints.fingerprint_correlation(T, obs1, obs2, tau=tau, k=k)
<SYSTEM_TASK:> r"""Dynamical fingerprint for relaxation experiment. <END_TASK> <USER_TASK:> Description: def fingerprint_relaxation(T, p0, obs, tau=1, k=None, ncv=None): r"""Dynamical fingerprint for relaxation experiment. The dynamical fingerprint is given by the implied time-scale spectrum together with the corresponding amplitudes. Parameters ---------- T : (M, M) ndarray or scipy.sparse matrix Transition matrix obs1 : (M,) ndarray Observable, represented as vector on state space obs2 : (M,) ndarray (optional) Second observable, for cross-correlations k : int (optional) Number of time-scales and amplitudes to compute tau : int (optional) Lag time of given transition matrix, for correct time-scales ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k Returns ------- timescales : (N,) ndarray Time-scales of the transition matrix amplitudes : (N,) ndarray Amplitudes for the relaxation experiment See also -------- relaxation, fingerprint_correlation References ---------- .. [1] Noe, F, S Doose, I Daidone, M Loellmann, M Sauer, J D Chodera and J Smith. 2010. Dynamical fingerprints for probing individual relaxation processes in biomolecular dynamics with simulations and kinetic experiments. PNAS 108 (12): 4822-4827. Notes ----- Fingerprints are a combination of time-scale and amplitude spectrum for a equilibrium correlation or a non-equilibrium relaxation experiment. **Relaxation** A relaxation experiment looks at the time dependent expectation value of an observable for a system out of equilibrium .. math:: \mathbb{E}_{w_{0}}[a(x, t)]=\sum_x w_0(x) a(x, t)=\sum_x w_0(x) \sum_y p^t(x, y) a(y). The fingerprint amplitudes :math:`\gamma_i` are given by .. math:: \gamma_i=\langle w_0, r_i\rangle \langle l_i, a \rangle. And the fingerprint time scales :math:`t_i` are given by .. math:: t_i=-\frac{\tau}{\log \lvert \lambda_i \rvert}. Examples -------- >>> import numpy as np >>> from msmtools.analysis import fingerprint_relaxation >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> p0 = np.array([1.0, 0.0, 0.0]) >>> a = np.array([1.0, 0.0, 0.0]) >>> ts, amp = fingerprint_relaxation(T, p0, a) >>> ts array([ inf, 9.49122158, 0.43429448]) >>> amp array([ 0.45454545, 0.5 , 0.04545455]) """
# check if square matrix and remember size T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric') n = T.shape[0] # will not do fingerprint analysis for nonreversible matrices if not is_reversible(T): raise ValueError('Fingerprint calculation is not supported for nonreversible transition matrices. ') p0 = _types.ensure_ndarray(p0, ndim=1, size=n, kind='numeric') obs = _types.ensure_ndarray(obs, ndim=1, size=n, kind='numeric') # go if _issparse(T): return sparse.fingerprints.fingerprint_relaxation(T, p0, obs, tau=tau, k=k, ncv=ncv) else: return dense.fingerprints.fingerprint_relaxation(T, p0, obs, tau=tau, k=k)