repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
eandersson/amqpstorm
amqpstorm/basic.py
Basic.ack
def ack(self, delivery_tag=0, multiple=False): """Acknowledge Message. :param int/long delivery_tag: Server-assigned delivery tag :param bool multiple: Acknowledge multiple messages :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ if not compatibility.is_integer(delivery_tag): raise AMQPInvalidArgument('delivery_tag should be an integer') elif not isinstance(multiple, bool): raise AMQPInvalidArgument('multiple should be a boolean') ack_frame = specification.Basic.Ack(delivery_tag=delivery_tag, multiple=multiple) self._channel.write_frame(ack_frame)
python
def ack(self, delivery_tag=0, multiple=False): """Acknowledge Message. :param int/long delivery_tag: Server-assigned delivery tag :param bool multiple: Acknowledge multiple messages :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ if not compatibility.is_integer(delivery_tag): raise AMQPInvalidArgument('delivery_tag should be an integer') elif not isinstance(multiple, bool): raise AMQPInvalidArgument('multiple should be a boolean') ack_frame = specification.Basic.Ack(delivery_tag=delivery_tag, multiple=multiple) self._channel.write_frame(ack_frame)
Acknowledge Message. :param int/long delivery_tag: Server-assigned delivery tag :param bool multiple: Acknowledge multiple messages :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L201-L220
eandersson/amqpstorm
amqpstorm/basic.py
Basic.nack
def nack(self, delivery_tag=0, multiple=False, requeue=True): """Negative Acknowledgement. :param int/long delivery_tag: Server-assigned delivery tag :param bool multiple: Negative acknowledge multiple messages :param bool requeue: Re-queue the message :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ if not compatibility.is_integer(delivery_tag): raise AMQPInvalidArgument('delivery_tag should be an integer') elif not isinstance(multiple, bool): raise AMQPInvalidArgument('multiple should be a boolean') elif not isinstance(requeue, bool): raise AMQPInvalidArgument('requeue should be a boolean') nack_frame = specification.Basic.Nack(delivery_tag=delivery_tag, multiple=multiple, requeue=requeue) self._channel.write_frame(nack_frame)
python
def nack(self, delivery_tag=0, multiple=False, requeue=True): """Negative Acknowledgement. :param int/long delivery_tag: Server-assigned delivery tag :param bool multiple: Negative acknowledge multiple messages :param bool requeue: Re-queue the message :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ if not compatibility.is_integer(delivery_tag): raise AMQPInvalidArgument('delivery_tag should be an integer') elif not isinstance(multiple, bool): raise AMQPInvalidArgument('multiple should be a boolean') elif not isinstance(requeue, bool): raise AMQPInvalidArgument('requeue should be a boolean') nack_frame = specification.Basic.Nack(delivery_tag=delivery_tag, multiple=multiple, requeue=requeue) self._channel.write_frame(nack_frame)
Negative Acknowledgement. :param int/long delivery_tag: Server-assigned delivery tag :param bool multiple: Negative acknowledge multiple messages :param bool requeue: Re-queue the message :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L222-L245
eandersson/amqpstorm
amqpstorm/basic.py
Basic.reject
def reject(self, delivery_tag=0, requeue=True): """Reject Message. :param int/long delivery_tag: Server-assigned delivery tag :param bool requeue: Re-queue the message :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ if not compatibility.is_integer(delivery_tag): raise AMQPInvalidArgument('delivery_tag should be an integer') elif not isinstance(requeue, bool): raise AMQPInvalidArgument('requeue should be a boolean') reject_frame = specification.Basic.Reject(delivery_tag=delivery_tag, requeue=requeue) self._channel.write_frame(reject_frame)
python
def reject(self, delivery_tag=0, requeue=True): """Reject Message. :param int/long delivery_tag: Server-assigned delivery tag :param bool requeue: Re-queue the message :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ if not compatibility.is_integer(delivery_tag): raise AMQPInvalidArgument('delivery_tag should be an integer') elif not isinstance(requeue, bool): raise AMQPInvalidArgument('requeue should be a boolean') reject_frame = specification.Basic.Reject(delivery_tag=delivery_tag, requeue=requeue) self._channel.write_frame(reject_frame)
Reject Message. :param int/long delivery_tag: Server-assigned delivery tag :param bool requeue: Re-queue the message :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L247-L266
eandersson/amqpstorm
amqpstorm/basic.py
Basic._consume_add_and_get_tag
def _consume_add_and_get_tag(self, consume_rpc_result): """Add the tag to the channel and return it. :param dict consume_rpc_result: :rtype: str """ consumer_tag = consume_rpc_result['consumer_tag'] self._channel.add_consumer_tag(consumer_tag) return consumer_tag
python
def _consume_add_and_get_tag(self, consume_rpc_result): """Add the tag to the channel and return it. :param dict consume_rpc_result: :rtype: str """ consumer_tag = consume_rpc_result['consumer_tag'] self._channel.add_consumer_tag(consumer_tag) return consumer_tag
Add the tag to the channel and return it. :param dict consume_rpc_result: :rtype: str
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L268-L277
eandersson/amqpstorm
amqpstorm/basic.py
Basic._consume_rpc_request
def _consume_rpc_request(self, arguments, consumer_tag, exclusive, no_ack, no_local, queue): """Create a Consume Frame and execute a RPC request. :param str queue: Queue name :param str consumer_tag: Consumer tag :param bool no_local: Do not deliver own messages :param bool no_ack: No acknowledgement needed :param bool exclusive: Request exclusive access :param dict arguments: Consume key/value arguments :rtype: dict """ consume_frame = specification.Basic.Consume(queue=queue, consumer_tag=consumer_tag, exclusive=exclusive, no_local=no_local, no_ack=no_ack, arguments=arguments) return self._channel.rpc_request(consume_frame)
python
def _consume_rpc_request(self, arguments, consumer_tag, exclusive, no_ack, no_local, queue): """Create a Consume Frame and execute a RPC request. :param str queue: Queue name :param str consumer_tag: Consumer tag :param bool no_local: Do not deliver own messages :param bool no_ack: No acknowledgement needed :param bool exclusive: Request exclusive access :param dict arguments: Consume key/value arguments :rtype: dict """ consume_frame = specification.Basic.Consume(queue=queue, consumer_tag=consumer_tag, exclusive=exclusive, no_local=no_local, no_ack=no_ack, arguments=arguments) return self._channel.rpc_request(consume_frame)
Create a Consume Frame and execute a RPC request. :param str queue: Queue name :param str consumer_tag: Consumer tag :param bool no_local: Do not deliver own messages :param bool no_ack: No acknowledgement needed :param bool exclusive: Request exclusive access :param dict arguments: Consume key/value arguments :rtype: dict
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L279-L298
eandersson/amqpstorm
amqpstorm/basic.py
Basic._validate_publish_parameters
def _validate_publish_parameters(body, exchange, immediate, mandatory, properties, routing_key): """Validate Publish Parameters. :param bytes|str|unicode body: Message payload :param str routing_key: Message routing key :param str exchange: The exchange to publish the message to :param dict properties: Message properties :param bool mandatory: Requires the message is published :param bool immediate: Request immediate delivery :raises AMQPInvalidArgument: Invalid Parameters :return: """ if not compatibility.is_string(body): raise AMQPInvalidArgument('body should be a string') elif not compatibility.is_string(routing_key): raise AMQPInvalidArgument('routing_key should be a string') elif not compatibility.is_string(exchange): raise AMQPInvalidArgument('exchange should be a string') elif properties is not None and not isinstance(properties, dict): raise AMQPInvalidArgument('properties should be a dict or None') elif not isinstance(mandatory, bool): raise AMQPInvalidArgument('mandatory should be a boolean') elif not isinstance(immediate, bool): raise AMQPInvalidArgument('immediate should be a boolean')
python
def _validate_publish_parameters(body, exchange, immediate, mandatory, properties, routing_key): """Validate Publish Parameters. :param bytes|str|unicode body: Message payload :param str routing_key: Message routing key :param str exchange: The exchange to publish the message to :param dict properties: Message properties :param bool mandatory: Requires the message is published :param bool immediate: Request immediate delivery :raises AMQPInvalidArgument: Invalid Parameters :return: """ if not compatibility.is_string(body): raise AMQPInvalidArgument('body should be a string') elif not compatibility.is_string(routing_key): raise AMQPInvalidArgument('routing_key should be a string') elif not compatibility.is_string(exchange): raise AMQPInvalidArgument('exchange should be a string') elif properties is not None and not isinstance(properties, dict): raise AMQPInvalidArgument('properties should be a dict or None') elif not isinstance(mandatory, bool): raise AMQPInvalidArgument('mandatory should be a boolean') elif not isinstance(immediate, bool): raise AMQPInvalidArgument('immediate should be a boolean')
Validate Publish Parameters. :param bytes|str|unicode body: Message payload :param str routing_key: Message routing key :param str exchange: The exchange to publish the message to :param dict properties: Message properties :param bool mandatory: Requires the message is published :param bool immediate: Request immediate delivery :raises AMQPInvalidArgument: Invalid Parameters :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L301-L327
eandersson/amqpstorm
amqpstorm/basic.py
Basic._handle_utf8_payload
def _handle_utf8_payload(body, properties): """Update the Body and Properties to the appropriate encoding. :param bytes|str|unicode body: Message payload :param dict properties: Message properties :return: """ if 'content_encoding' not in properties: properties['content_encoding'] = 'utf-8' encoding = properties['content_encoding'] if compatibility.is_unicode(body): body = body.encode(encoding) elif compatibility.PYTHON3 and isinstance(body, str): body = bytes(body, encoding=encoding) return body
python
def _handle_utf8_payload(body, properties): """Update the Body and Properties to the appropriate encoding. :param bytes|str|unicode body: Message payload :param dict properties: Message properties :return: """ if 'content_encoding' not in properties: properties['content_encoding'] = 'utf-8' encoding = properties['content_encoding'] if compatibility.is_unicode(body): body = body.encode(encoding) elif compatibility.PYTHON3 and isinstance(body, str): body = bytes(body, encoding=encoding) return body
Update the Body and Properties to the appropriate encoding. :param bytes|str|unicode body: Message payload :param dict properties: Message properties :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L330-L345
eandersson/amqpstorm
amqpstorm/basic.py
Basic._get_message
def _get_message(self, get_frame, auto_decode): """Get and return a message using a Basic.Get frame. :param Basic.Get get_frame: :param bool auto_decode: Auto-decode strings when possible. :rtype: Message """ message_uuid = self._channel.rpc.register_request( get_frame.valid_responses + ['ContentHeader', 'ContentBody'] ) try: self._channel.write_frame(get_frame) get_ok_frame = self._channel.rpc.get_request(message_uuid, raw=True, multiple=True) if isinstance(get_ok_frame, specification.Basic.GetEmpty): return None content_header = self._channel.rpc.get_request(message_uuid, raw=True, multiple=True) body = self._get_content_body(message_uuid, content_header.body_size) finally: self._channel.rpc.remove(message_uuid) return Message(channel=self._channel, body=body, method=dict(get_ok_frame), properties=dict(content_header.properties), auto_decode=auto_decode)
python
def _get_message(self, get_frame, auto_decode): """Get and return a message using a Basic.Get frame. :param Basic.Get get_frame: :param bool auto_decode: Auto-decode strings when possible. :rtype: Message """ message_uuid = self._channel.rpc.register_request( get_frame.valid_responses + ['ContentHeader', 'ContentBody'] ) try: self._channel.write_frame(get_frame) get_ok_frame = self._channel.rpc.get_request(message_uuid, raw=True, multiple=True) if isinstance(get_ok_frame, specification.Basic.GetEmpty): return None content_header = self._channel.rpc.get_request(message_uuid, raw=True, multiple=True) body = self._get_content_body(message_uuid, content_header.body_size) finally: self._channel.rpc.remove(message_uuid) return Message(channel=self._channel, body=body, method=dict(get_ok_frame), properties=dict(content_header.properties), auto_decode=auto_decode)
Get and return a message using a Basic.Get frame. :param Basic.Get get_frame: :param bool auto_decode: Auto-decode strings when possible. :rtype: Message
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L347-L376
eandersson/amqpstorm
amqpstorm/basic.py
Basic._publish_confirm
def _publish_confirm(self, frames_out): """Confirm that message was published successfully. :param list frames_out: :rtype: bool """ confirm_uuid = self._channel.rpc.register_request(['Basic.Ack', 'Basic.Nack']) self._channel.write_frames(frames_out) result = self._channel.rpc.get_request(confirm_uuid, raw=True) self._channel.check_for_errors() if isinstance(result, specification.Basic.Ack): return True return False
python
def _publish_confirm(self, frames_out): """Confirm that message was published successfully. :param list frames_out: :rtype: bool """ confirm_uuid = self._channel.rpc.register_request(['Basic.Ack', 'Basic.Nack']) self._channel.write_frames(frames_out) result = self._channel.rpc.get_request(confirm_uuid, raw=True) self._channel.check_for_errors() if isinstance(result, specification.Basic.Ack): return True return False
Confirm that message was published successfully. :param list frames_out: :rtype: bool
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L378-L392
eandersson/amqpstorm
amqpstorm/basic.py
Basic._create_content_body
def _create_content_body(self, body): """Split body based on the maximum frame size. This function is based on code from Rabbitpy. https://github.com/gmr/rabbitpy :param bytes|str|unicode body: Message payload :rtype: collections.Iterable """ frames = int(math.ceil(len(body) / float(self._max_frame_size))) for offset in compatibility.RANGE(0, frames): start_frame = self._max_frame_size * offset end_frame = start_frame + self._max_frame_size body_len = len(body) if end_frame > body_len: end_frame = body_len yield pamqp_body.ContentBody(body[start_frame:end_frame])
python
def _create_content_body(self, body): """Split body based on the maximum frame size. This function is based on code from Rabbitpy. https://github.com/gmr/rabbitpy :param bytes|str|unicode body: Message payload :rtype: collections.Iterable """ frames = int(math.ceil(len(body) / float(self._max_frame_size))) for offset in compatibility.RANGE(0, frames): start_frame = self._max_frame_size * offset end_frame = start_frame + self._max_frame_size body_len = len(body) if end_frame > body_len: end_frame = body_len yield pamqp_body.ContentBody(body[start_frame:end_frame])
Split body based on the maximum frame size. This function is based on code from Rabbitpy. https://github.com/gmr/rabbitpy :param bytes|str|unicode body: Message payload :rtype: collections.Iterable
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L394-L411
eandersson/amqpstorm
amqpstorm/basic.py
Basic._get_content_body
def _get_content_body(self, message_uuid, body_size): """Get Content Body using RPC requests. :param str uuid_body: Rpc Identifier. :param int body_size: Content Size. :rtype: str """ body = bytes() while len(body) < body_size: body_piece = self._channel.rpc.get_request(message_uuid, raw=True, multiple=True) if not body_piece.value: break body += body_piece.value return body
python
def _get_content_body(self, message_uuid, body_size): """Get Content Body using RPC requests. :param str uuid_body: Rpc Identifier. :param int body_size: Content Size. :rtype: str """ body = bytes() while len(body) < body_size: body_piece = self._channel.rpc.get_request(message_uuid, raw=True, multiple=True) if not body_piece.value: break body += body_piece.value return body
Get Content Body using RPC requests. :param str uuid_body: Rpc Identifier. :param int body_size: Content Size. :rtype: str
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/basic.py#L413-L428
eandersson/amqpstorm
amqpstorm/queue.py
Queue.declare
def declare(self, queue='', passive=False, durable=False, exclusive=False, auto_delete=False, arguments=None): """Declare a Queue. :param str queue: Queue name :param bool passive: Do not create :param bool durable: Durable queue :param bool exclusive: Request exclusive access :param bool auto_delete: Automatically delete when not in use :param dict arguments: Queue key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not compatibility.is_string(queue): raise AMQPInvalidArgument('queue should be a string') elif not isinstance(passive, bool): raise AMQPInvalidArgument('passive should be a boolean') elif not isinstance(durable, bool): raise AMQPInvalidArgument('durable should be a boolean') elif not isinstance(exclusive, bool): raise AMQPInvalidArgument('exclusive should be a boolean') elif not isinstance(auto_delete, bool): raise AMQPInvalidArgument('auto_delete should be a boolean') elif arguments is not None and not isinstance(arguments, dict): raise AMQPInvalidArgument('arguments should be a dict or None') declare_frame = pamqp_queue.Declare(queue=queue, passive=passive, durable=durable, exclusive=exclusive, auto_delete=auto_delete, arguments=arguments) return self._channel.rpc_request(declare_frame)
python
def declare(self, queue='', passive=False, durable=False, exclusive=False, auto_delete=False, arguments=None): """Declare a Queue. :param str queue: Queue name :param bool passive: Do not create :param bool durable: Durable queue :param bool exclusive: Request exclusive access :param bool auto_delete: Automatically delete when not in use :param dict arguments: Queue key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not compatibility.is_string(queue): raise AMQPInvalidArgument('queue should be a string') elif not isinstance(passive, bool): raise AMQPInvalidArgument('passive should be a boolean') elif not isinstance(durable, bool): raise AMQPInvalidArgument('durable should be a boolean') elif not isinstance(exclusive, bool): raise AMQPInvalidArgument('exclusive should be a boolean') elif not isinstance(auto_delete, bool): raise AMQPInvalidArgument('auto_delete should be a boolean') elif arguments is not None and not isinstance(arguments, dict): raise AMQPInvalidArgument('arguments should be a dict or None') declare_frame = pamqp_queue.Declare(queue=queue, passive=passive, durable=durable, exclusive=exclusive, auto_delete=auto_delete, arguments=arguments) return self._channel.rpc_request(declare_frame)
Declare a Queue. :param str queue: Queue name :param bool passive: Do not create :param bool durable: Durable queue :param bool exclusive: Request exclusive access :param bool auto_delete: Automatically delete when not in use :param dict arguments: Queue key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/queue.py#L18-L55
eandersson/amqpstorm
amqpstorm/queue.py
Queue.delete
def delete(self, queue='', if_unused=False, if_empty=False): """Delete a Queue. :param str queue: Queue name :param bool if_unused: Delete only if unused :param bool if_empty: Delete only if empty :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not compatibility.is_string(queue): raise AMQPInvalidArgument('queue should be a string') elif not isinstance(if_unused, bool): raise AMQPInvalidArgument('if_unused should be a boolean') elif not isinstance(if_empty, bool): raise AMQPInvalidArgument('if_empty should be a boolean') delete_frame = pamqp_queue.Delete(queue=queue, if_unused=if_unused, if_empty=if_empty) return self._channel.rpc_request(delete_frame)
python
def delete(self, queue='', if_unused=False, if_empty=False): """Delete a Queue. :param str queue: Queue name :param bool if_unused: Delete only if unused :param bool if_empty: Delete only if empty :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not compatibility.is_string(queue): raise AMQPInvalidArgument('queue should be a string') elif not isinstance(if_unused, bool): raise AMQPInvalidArgument('if_unused should be a boolean') elif not isinstance(if_empty, bool): raise AMQPInvalidArgument('if_empty should be a boolean') delete_frame = pamqp_queue.Delete(queue=queue, if_unused=if_unused, if_empty=if_empty) return self._channel.rpc_request(delete_frame)
Delete a Queue. :param str queue: Queue name :param bool if_unused: Delete only if unused :param bool if_empty: Delete only if empty :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/queue.py#L57-L80
eandersson/amqpstorm
amqpstorm/queue.py
Queue.purge
def purge(self, queue): """Purge a Queue. :param str queue: Queue name :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not compatibility.is_string(queue): raise AMQPInvalidArgument('queue should be a string') purge_frame = pamqp_queue.Purge(queue=queue) return self._channel.rpc_request(purge_frame)
python
def purge(self, queue): """Purge a Queue. :param str queue: Queue name :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not compatibility.is_string(queue): raise AMQPInvalidArgument('queue should be a string') purge_frame = pamqp_queue.Purge(queue=queue) return self._channel.rpc_request(purge_frame)
Purge a Queue. :param str queue: Queue name :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/queue.py#L82-L99
eandersson/amqpstorm
amqpstorm/queue.py
Queue.bind
def bind(self, queue='', exchange='', routing_key='', arguments=None): """Bind a Queue. :param str queue: Queue name :param str exchange: Exchange name :param str routing_key: The routing key to use :param dict arguments: Bind key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not compatibility.is_string(queue): raise AMQPInvalidArgument('queue should be a string') elif not compatibility.is_string(exchange): raise AMQPInvalidArgument('exchange should be a string') elif not compatibility.is_string(routing_key): raise AMQPInvalidArgument('routing_key should be a string') elif arguments is not None and not isinstance(arguments, dict): raise AMQPInvalidArgument('arguments should be a dict or None') bind_frame = pamqp_queue.Bind(queue=queue, exchange=exchange, routing_key=routing_key, arguments=arguments) return self._channel.rpc_request(bind_frame)
python
def bind(self, queue='', exchange='', routing_key='', arguments=None): """Bind a Queue. :param str queue: Queue name :param str exchange: Exchange name :param str routing_key: The routing key to use :param dict arguments: Bind key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not compatibility.is_string(queue): raise AMQPInvalidArgument('queue should be a string') elif not compatibility.is_string(exchange): raise AMQPInvalidArgument('exchange should be a string') elif not compatibility.is_string(routing_key): raise AMQPInvalidArgument('routing_key should be a string') elif arguments is not None and not isinstance(arguments, dict): raise AMQPInvalidArgument('arguments should be a dict or None') bind_frame = pamqp_queue.Bind(queue=queue, exchange=exchange, routing_key=routing_key, arguments=arguments) return self._channel.rpc_request(bind_frame)
Bind a Queue. :param str queue: Queue name :param str exchange: Exchange name :param str routing_key: The routing key to use :param dict arguments: Bind key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/queue.py#L101-L129
eandersson/amqpstorm
amqpstorm/queue.py
Queue.unbind
def unbind(self, queue='', exchange='', routing_key='', arguments=None): """Unbind a Queue. :param str queue: Queue name :param str exchange: Exchange name :param str routing_key: The routing key used :param dict arguments: Unbind key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not compatibility.is_string(queue): raise AMQPInvalidArgument('queue should be a string') elif not compatibility.is_string(exchange): raise AMQPInvalidArgument('exchange should be a string') elif not compatibility.is_string(routing_key): raise AMQPInvalidArgument('routing_key should be a string') elif arguments is not None and not isinstance(arguments, dict): raise AMQPInvalidArgument('arguments should be a dict or None') unbind_frame = pamqp_queue.Unbind(queue=queue, exchange=exchange, routing_key=routing_key, arguments=arguments) return self._channel.rpc_request(unbind_frame)
python
def unbind(self, queue='', exchange='', routing_key='', arguments=None): """Unbind a Queue. :param str queue: Queue name :param str exchange: Exchange name :param str routing_key: The routing key used :param dict arguments: Unbind key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not compatibility.is_string(queue): raise AMQPInvalidArgument('queue should be a string') elif not compatibility.is_string(exchange): raise AMQPInvalidArgument('exchange should be a string') elif not compatibility.is_string(routing_key): raise AMQPInvalidArgument('routing_key should be a string') elif arguments is not None and not isinstance(arguments, dict): raise AMQPInvalidArgument('arguments should be a dict or None') unbind_frame = pamqp_queue.Unbind(queue=queue, exchange=exchange, routing_key=routing_key, arguments=arguments) return self._channel.rpc_request(unbind_frame)
Unbind a Queue. :param str queue: Queue name :param str exchange: Exchange name :param str routing_key: The routing key used :param dict arguments: Unbind key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/queue.py#L131-L159
eandersson/amqpstorm
amqpstorm/channel0.py
Channel0.on_frame
def on_frame(self, frame_in): """Handle frames sent to Channel0. :param frame_in: Amqp frame. :return: """ LOGGER.debug('Frame Received: %s', frame_in.name) if frame_in.name == 'Heartbeat': return elif frame_in.name == 'Connection.Close': self._close_connection(frame_in) elif frame_in.name == 'Connection.CloseOk': self._close_connection_ok() elif frame_in.name == 'Connection.Blocked': self._blocked_connection(frame_in) elif frame_in.name == 'Connection.Unblocked': self._unblocked_connection() elif frame_in.name == 'Connection.OpenOk': self._set_connection_state(Stateful.OPEN) elif frame_in.name == 'Connection.Start': self.server_properties = frame_in.server_properties self._send_start_ok(frame_in) elif frame_in.name == 'Connection.Tune': self._send_tune_ok(frame_in) self._send_open_connection() else: LOGGER.error('[Channel0] Unhandled Frame: %s', frame_in.name)
python
def on_frame(self, frame_in): """Handle frames sent to Channel0. :param frame_in: Amqp frame. :return: """ LOGGER.debug('Frame Received: %s', frame_in.name) if frame_in.name == 'Heartbeat': return elif frame_in.name == 'Connection.Close': self._close_connection(frame_in) elif frame_in.name == 'Connection.CloseOk': self._close_connection_ok() elif frame_in.name == 'Connection.Blocked': self._blocked_connection(frame_in) elif frame_in.name == 'Connection.Unblocked': self._unblocked_connection() elif frame_in.name == 'Connection.OpenOk': self._set_connection_state(Stateful.OPEN) elif frame_in.name == 'Connection.Start': self.server_properties = frame_in.server_properties self._send_start_ok(frame_in) elif frame_in.name == 'Connection.Tune': self._send_tune_ok(frame_in) self._send_open_connection() else: LOGGER.error('[Channel0] Unhandled Frame: %s', frame_in.name)
Handle frames sent to Channel0. :param frame_in: Amqp frame. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel0.py#L33-L59
eandersson/amqpstorm
amqpstorm/channel0.py
Channel0._close_connection
def _close_connection(self, frame_in): """Connection Close. :param specification.Connection.Close frame_in: Amqp frame. :return: """ self._set_connection_state(Stateful.CLOSED) if frame_in.reply_code != 200: reply_text = try_utf8_decode(frame_in.reply_text) message = ( 'Connection was closed by remote server: %s' % reply_text ) exception = AMQPConnectionError(message, reply_code=frame_in.reply_code) self._connection.exceptions.append(exception)
python
def _close_connection(self, frame_in): """Connection Close. :param specification.Connection.Close frame_in: Amqp frame. :return: """ self._set_connection_state(Stateful.CLOSED) if frame_in.reply_code != 200: reply_text = try_utf8_decode(frame_in.reply_text) message = ( 'Connection was closed by remote server: %s' % reply_text ) exception = AMQPConnectionError(message, reply_code=frame_in.reply_code) self._connection.exceptions.append(exception)
Connection Close. :param specification.Connection.Close frame_in: Amqp frame. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel0.py#L77-L91
eandersson/amqpstorm
amqpstorm/channel0.py
Channel0._blocked_connection
def _blocked_connection(self, frame_in): """Connection is Blocked. :param frame_in: :return: """ self.is_blocked = True LOGGER.warning( 'Connection is blocked by remote server: %s', try_utf8_decode(frame_in.reason) )
python
def _blocked_connection(self, frame_in): """Connection is Blocked. :param frame_in: :return: """ self.is_blocked = True LOGGER.warning( 'Connection is blocked by remote server: %s', try_utf8_decode(frame_in.reason) )
Connection is Blocked. :param frame_in: :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel0.py#L100-L110
eandersson/amqpstorm
amqpstorm/channel0.py
Channel0._send_start_ok
def _send_start_ok(self, frame_in): """Send Start OK frame. :param specification.Connection.Start frame_in: Amqp frame. :return: """ mechanisms = try_utf8_decode(frame_in.mechanisms) if 'EXTERNAL' in mechanisms: mechanism = 'EXTERNAL' credentials = '\0\0' elif 'PLAIN' in mechanisms: mechanism = 'PLAIN' credentials = self._plain_credentials() else: exception = AMQPConnectionError( 'Unsupported Security Mechanism(s): %s' % frame_in.mechanisms ) self._connection.exceptions.append(exception) return start_ok_frame = specification.Connection.StartOk( mechanism=mechanism, client_properties=self._client_properties(), response=credentials, locale=LOCALE ) self._write_frame(start_ok_frame)
python
def _send_start_ok(self, frame_in): """Send Start OK frame. :param specification.Connection.Start frame_in: Amqp frame. :return: """ mechanisms = try_utf8_decode(frame_in.mechanisms) if 'EXTERNAL' in mechanisms: mechanism = 'EXTERNAL' credentials = '\0\0' elif 'PLAIN' in mechanisms: mechanism = 'PLAIN' credentials = self._plain_credentials() else: exception = AMQPConnectionError( 'Unsupported Security Mechanism(s): %s' % frame_in.mechanisms ) self._connection.exceptions.append(exception) return start_ok_frame = specification.Connection.StartOk( mechanism=mechanism, client_properties=self._client_properties(), response=credentials, locale=LOCALE ) self._write_frame(start_ok_frame)
Send Start OK frame. :param specification.Connection.Start frame_in: Amqp frame. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel0.py#L140-L166
eandersson/amqpstorm
amqpstorm/channel0.py
Channel0._send_tune_ok
def _send_tune_ok(self, frame_in): """Send Tune OK frame. :param specification.Connection.Tune frame_in: Tune frame. :return: """ self.max_allowed_channels = self._negotiate(frame_in.channel_max, MAX_CHANNELS) self.max_frame_size = self._negotiate(frame_in.frame_max, MAX_FRAME_SIZE) LOGGER.debug( 'Negotiated max frame size %d, max channels %d', self.max_frame_size, self.max_allowed_channels ) tune_ok_frame = specification.Connection.TuneOk( channel_max=self.max_allowed_channels, frame_max=self.max_frame_size, heartbeat=self._heartbeat) self._write_frame(tune_ok_frame)
python
def _send_tune_ok(self, frame_in): """Send Tune OK frame. :param specification.Connection.Tune frame_in: Tune frame. :return: """ self.max_allowed_channels = self._negotiate(frame_in.channel_max, MAX_CHANNELS) self.max_frame_size = self._negotiate(frame_in.frame_max, MAX_FRAME_SIZE) LOGGER.debug( 'Negotiated max frame size %d, max channels %d', self.max_frame_size, self.max_allowed_channels ) tune_ok_frame = specification.Connection.TuneOk( channel_max=self.max_allowed_channels, frame_max=self.max_frame_size, heartbeat=self._heartbeat) self._write_frame(tune_ok_frame)
Send Tune OK frame. :param specification.Connection.Tune frame_in: Tune frame. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel0.py#L168-L188
eandersson/amqpstorm
amqpstorm/channel0.py
Channel0._send_open_connection
def _send_open_connection(self): """Send Open Connection frame. :return: """ open_frame = specification.Connection.Open( virtual_host=self._parameters['virtual_host'] ) self._write_frame(open_frame)
python
def _send_open_connection(self): """Send Open Connection frame. :return: """ open_frame = specification.Connection.Open( virtual_host=self._parameters['virtual_host'] ) self._write_frame(open_frame)
Send Open Connection frame. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel0.py#L190-L198
eandersson/amqpstorm
amqpstorm/channel0.py
Channel0._write_frame
def _write_frame(self, frame_out): """Write a pamqp frame from Channel0. :param frame_out: Amqp frame. :return: """ self._connection.write_frame(0, frame_out) LOGGER.debug('Frame Sent: %s', frame_out.name)
python
def _write_frame(self, frame_out): """Write a pamqp frame from Channel0. :param frame_out: Amqp frame. :return: """ self._connection.write_frame(0, frame_out) LOGGER.debug('Frame Sent: %s', frame_out.name)
Write a pamqp frame from Channel0. :param frame_out: Amqp frame. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel0.py#L208-L215
eandersson/amqpstorm
amqpstorm/channel0.py
Channel0._client_properties
def _client_properties(): """AMQPStorm Client Properties. :rtype: dict """ return { 'product': 'AMQPStorm', 'platform': 'Python %s (%s)' % (platform.python_version(), platform.python_implementation()), 'capabilities': { 'basic.nack': True, 'connection.blocked': True, 'publisher_confirms': True, 'consumer_cancel_notify': True, 'authentication_failure_close': True, }, 'information': 'See https://github.com/eandersson/amqpstorm', 'version': __version__ }
python
def _client_properties(): """AMQPStorm Client Properties. :rtype: dict """ return { 'product': 'AMQPStorm', 'platform': 'Python %s (%s)' % (platform.python_version(), platform.python_implementation()), 'capabilities': { 'basic.nack': True, 'connection.blocked': True, 'publisher_confirms': True, 'consumer_cancel_notify': True, 'authentication_failure_close': True, }, 'information': 'See https://github.com/eandersson/amqpstorm', 'version': __version__ }
AMQPStorm Client Properties. :rtype: dict
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel0.py#L218-L236
eandersson/amqpstorm
amqpstorm/channel.py
Channel.build_inbound_messages
def build_inbound_messages(self, break_on_empty=False, to_tuple=False, auto_decode=True): """Build messages in the inbound queue. :param bool break_on_empty: Should we break the loop when there are no more messages in our inbound queue. This does not guarantee that the upstream queue is empty, as it's possible that if messages are consumed faster than delivered, the inbound queue will then be emptied and the consumption will be broken. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: :py:class:`generator` """ self.check_for_errors() while not self.is_closed: message = self._build_message(auto_decode=auto_decode) if not message: self.check_for_errors() sleep(IDLE_WAIT) if break_on_empty and not self._inbound: break continue if to_tuple: yield message.to_tuple() continue yield message
python
def build_inbound_messages(self, break_on_empty=False, to_tuple=False, auto_decode=True): """Build messages in the inbound queue. :param bool break_on_empty: Should we break the loop when there are no more messages in our inbound queue. This does not guarantee that the upstream queue is empty, as it's possible that if messages are consumed faster than delivered, the inbound queue will then be emptied and the consumption will be broken. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: :py:class:`generator` """ self.check_for_errors() while not self.is_closed: message = self._build_message(auto_decode=auto_decode) if not message: self.check_for_errors() sleep(IDLE_WAIT) if break_on_empty and not self._inbound: break continue if to_tuple: yield message.to_tuple() continue yield message
Build messages in the inbound queue. :param bool break_on_empty: Should we break the loop when there are no more messages in our inbound queue. This does not guarantee that the upstream queue is empty, as it's possible that if messages are consumed faster than delivered, the inbound queue will then be emptied and the consumption will be broken. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: :py:class:`generator`
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L97-L131
eandersson/amqpstorm
amqpstorm/channel.py
Channel.close
def close(self, reply_code=200, reply_text=''): """Close Channel. :param int reply_code: Close reply code (e.g. 200) :param str reply_text: Close reply text :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ if not compatibility.is_integer(reply_code): raise AMQPInvalidArgument('reply_code should be an integer') elif not compatibility.is_string(reply_text): raise AMQPInvalidArgument('reply_text should be a string') try: if self._connection.is_closed or self.is_closed: self.stop_consuming() LOGGER.debug('Channel #%d forcefully Closed', self.channel_id) return self.set_state(self.CLOSING) LOGGER.debug('Channel #%d Closing', self.channel_id) try: self.stop_consuming() except AMQPChannelError: self.remove_consumer_tag() self.rpc_request(specification.Channel.Close( reply_code=reply_code, reply_text=reply_text), connection_adapter=self._connection ) finally: if self._inbound: del self._inbound[:] self.set_state(self.CLOSED) if self._on_close_impl: self._on_close_impl(self.channel_id) LOGGER.debug('Channel #%d Closed', self.channel_id)
python
def close(self, reply_code=200, reply_text=''): """Close Channel. :param int reply_code: Close reply code (e.g. 200) :param str reply_text: Close reply text :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ if not compatibility.is_integer(reply_code): raise AMQPInvalidArgument('reply_code should be an integer') elif not compatibility.is_string(reply_text): raise AMQPInvalidArgument('reply_text should be a string') try: if self._connection.is_closed or self.is_closed: self.stop_consuming() LOGGER.debug('Channel #%d forcefully Closed', self.channel_id) return self.set_state(self.CLOSING) LOGGER.debug('Channel #%d Closing', self.channel_id) try: self.stop_consuming() except AMQPChannelError: self.remove_consumer_tag() self.rpc_request(specification.Channel.Close( reply_code=reply_code, reply_text=reply_text), connection_adapter=self._connection ) finally: if self._inbound: del self._inbound[:] self.set_state(self.CLOSED) if self._on_close_impl: self._on_close_impl(self.channel_id) LOGGER.debug('Channel #%d Closed', self.channel_id)
Close Channel. :param int reply_code: Close reply code (e.g. 200) :param str reply_text: Close reply text :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L133-L172
eandersson/amqpstorm
amqpstorm/channel.py
Channel.check_for_errors
def check_for_errors(self): """Check connection and channel for errors. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ try: self._connection.check_for_errors() except AMQPConnectionError: self.set_state(self.CLOSED) raise if self.exceptions: exception = self.exceptions[0] if self.is_open: self.exceptions.pop(0) raise exception if self.is_closed: raise AMQPChannelError('channel was closed')
python
def check_for_errors(self): """Check connection and channel for errors. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ try: self._connection.check_for_errors() except AMQPConnectionError: self.set_state(self.CLOSED) raise if self.exceptions: exception = self.exceptions[0] if self.is_open: self.exceptions.pop(0) raise exception if self.is_closed: raise AMQPChannelError('channel was closed')
Check connection and channel for errors. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L174-L195
eandersson/amqpstorm
amqpstorm/channel.py
Channel.confirm_deliveries
def confirm_deliveries(self): """Set the channel to confirm that each message has been successfully delivered. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ self._confirming_deliveries = True confirm_frame = specification.Confirm.Select() return self.rpc_request(confirm_frame)
python
def confirm_deliveries(self): """Set the channel to confirm that each message has been successfully delivered. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ self._confirming_deliveries = True confirm_frame = specification.Confirm.Select() return self.rpc_request(confirm_frame)
Set the channel to confirm that each message has been successfully delivered. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L197-L209
eandersson/amqpstorm
amqpstorm/channel.py
Channel.on_frame
def on_frame(self, frame_in): """Handle frame sent to this specific channel. :param pamqp.Frame frame_in: Amqp frame. :return: """ if self.rpc.on_frame(frame_in): return if frame_in.name in CONTENT_FRAME: self._inbound.append(frame_in) elif frame_in.name == 'Basic.Cancel': self._basic_cancel(frame_in) elif frame_in.name == 'Basic.CancelOk': self.remove_consumer_tag(frame_in.consumer_tag) elif frame_in.name == 'Basic.ConsumeOk': self.add_consumer_tag(frame_in['consumer_tag']) elif frame_in.name == 'Basic.Return': self._basic_return(frame_in) elif frame_in.name == 'Channel.Close': self._close_channel(frame_in) elif frame_in.name == 'Channel.Flow': self.write_frame(specification.Channel.FlowOk(frame_in.active)) else: LOGGER.error( '[Channel%d] Unhandled Frame: %s -- %s', self.channel_id, frame_in.name, dict(frame_in) )
python
def on_frame(self, frame_in): """Handle frame sent to this specific channel. :param pamqp.Frame frame_in: Amqp frame. :return: """ if self.rpc.on_frame(frame_in): return if frame_in.name in CONTENT_FRAME: self._inbound.append(frame_in) elif frame_in.name == 'Basic.Cancel': self._basic_cancel(frame_in) elif frame_in.name == 'Basic.CancelOk': self.remove_consumer_tag(frame_in.consumer_tag) elif frame_in.name == 'Basic.ConsumeOk': self.add_consumer_tag(frame_in['consumer_tag']) elif frame_in.name == 'Basic.Return': self._basic_return(frame_in) elif frame_in.name == 'Channel.Close': self._close_channel(frame_in) elif frame_in.name == 'Channel.Flow': self.write_frame(specification.Channel.FlowOk(frame_in.active)) else: LOGGER.error( '[Channel%d] Unhandled Frame: %s -- %s', self.channel_id, frame_in.name, dict(frame_in) )
Handle frame sent to this specific channel. :param pamqp.Frame frame_in: Amqp frame. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L219-L246
eandersson/amqpstorm
amqpstorm/channel.py
Channel.open
def open(self): """Open Channel. :return: """ self._inbound = [] self._exceptions = [] self.set_state(self.OPENING) self.rpc_request(specification.Channel.Open()) self.set_state(self.OPEN)
python
def open(self): """Open Channel. :return: """ self._inbound = [] self._exceptions = [] self.set_state(self.OPENING) self.rpc_request(specification.Channel.Open()) self.set_state(self.OPEN)
Open Channel. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L248-L257
eandersson/amqpstorm
amqpstorm/channel.py
Channel.process_data_events
def process_data_events(self, to_tuple=False, auto_decode=True): """Consume inbound messages. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ if not self._consumer_callbacks: raise AMQPChannelError('no consumer callback defined') for message in self.build_inbound_messages(break_on_empty=True, auto_decode=auto_decode): consumer_tag = message._method.get('consumer_tag') if to_tuple: # noinspection PyCallingNonCallable self._consumer_callbacks[consumer_tag](*message.to_tuple()) continue # noinspection PyCallingNonCallable self._consumer_callbacks[consumer_tag](message)
python
def process_data_events(self, to_tuple=False, auto_decode=True): """Consume inbound messages. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ if not self._consumer_callbacks: raise AMQPChannelError('no consumer callback defined') for message in self.build_inbound_messages(break_on_empty=True, auto_decode=auto_decode): consumer_tag = message._method.get('consumer_tag') if to_tuple: # noinspection PyCallingNonCallable self._consumer_callbacks[consumer_tag](*message.to_tuple()) continue # noinspection PyCallingNonCallable self._consumer_callbacks[consumer_tag](message)
Consume inbound messages. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L259-L282
eandersson/amqpstorm
amqpstorm/channel.py
Channel.rpc_request
def rpc_request(self, frame_out, connection_adapter=None): """Perform a RPC Request. :param specification.Frame frame_out: Amqp frame. :rtype: dict """ with self.rpc.lock: uuid = self.rpc.register_request(frame_out.valid_responses) self._connection.write_frame(self.channel_id, frame_out) return self.rpc.get_request( uuid, connection_adapter=connection_adapter )
python
def rpc_request(self, frame_out, connection_adapter=None): """Perform a RPC Request. :param specification.Frame frame_out: Amqp frame. :rtype: dict """ with self.rpc.lock: uuid = self.rpc.register_request(frame_out.valid_responses) self._connection.write_frame(self.channel_id, frame_out) return self.rpc.get_request( uuid, connection_adapter=connection_adapter )
Perform a RPC Request. :param specification.Frame frame_out: Amqp frame. :rtype: dict
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L284-L295
eandersson/amqpstorm
amqpstorm/channel.py
Channel.start_consuming
def start_consuming(self, to_tuple=False, auto_decode=True): """Start consuming messages. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ while not self.is_closed: self.process_data_events( to_tuple=to_tuple, auto_decode=auto_decode ) if self.consumer_tags: sleep(IDLE_WAIT) continue break
python
def start_consuming(self, to_tuple=False, auto_decode=True): """Start consuming messages. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ while not self.is_closed: self.process_data_events( to_tuple=to_tuple, auto_decode=auto_decode ) if self.consumer_tags: sleep(IDLE_WAIT) continue break
Start consuming messages. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L297-L318
eandersson/amqpstorm
amqpstorm/channel.py
Channel.stop_consuming
def stop_consuming(self): """Stop consuming messages. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ if not self.consumer_tags: return if not self.is_closed: for tag in self.consumer_tags: self.basic.cancel(tag) self.remove_consumer_tag()
python
def stop_consuming(self): """Stop consuming messages. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ if not self.consumer_tags: return if not self.is_closed: for tag in self.consumer_tags: self.basic.cancel(tag) self.remove_consumer_tag()
Stop consuming messages. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L320-L334
eandersson/amqpstorm
amqpstorm/channel.py
Channel.write_frame
def write_frame(self, frame_out): """Write a pamqp frame from the current channel. :param specification.Frame frame_out: A single pamqp frame. :return: """ self.check_for_errors() self._connection.write_frame(self.channel_id, frame_out)
python
def write_frame(self, frame_out): """Write a pamqp frame from the current channel. :param specification.Frame frame_out: A single pamqp frame. :return: """ self.check_for_errors() self._connection.write_frame(self.channel_id, frame_out)
Write a pamqp frame from the current channel. :param specification.Frame frame_out: A single pamqp frame. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L336-L344
eandersson/amqpstorm
amqpstorm/channel.py
Channel.write_frames
def write_frames(self, frames_out): """Write multiple pamqp frames from the current channel. :param list frames_out: A list of pamqp frames. :return: """ self.check_for_errors() self._connection.write_frames(self.channel_id, frames_out)
python
def write_frames(self, frames_out): """Write multiple pamqp frames from the current channel. :param list frames_out: A list of pamqp frames. :return: """ self.check_for_errors() self._connection.write_frames(self.channel_id, frames_out)
Write multiple pamqp frames from the current channel. :param list frames_out: A list of pamqp frames. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L346-L354
eandersson/amqpstorm
amqpstorm/channel.py
Channel._basic_cancel
def _basic_cancel(self, frame_in): """Handle a Basic Cancel frame. :param specification.Basic.Cancel frame_in: Amqp frame. :return: """ LOGGER.warning( 'Received Basic.Cancel on consumer_tag: %s', try_utf8_decode(frame_in.consumer_tag) ) self.remove_consumer_tag(frame_in.consumer_tag)
python
def _basic_cancel(self, frame_in): """Handle a Basic Cancel frame. :param specification.Basic.Cancel frame_in: Amqp frame. :return: """ LOGGER.warning( 'Received Basic.Cancel on consumer_tag: %s', try_utf8_decode(frame_in.consumer_tag) ) self.remove_consumer_tag(frame_in.consumer_tag)
Handle a Basic Cancel frame. :param specification.Basic.Cancel frame_in: Amqp frame. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L356-L367
eandersson/amqpstorm
amqpstorm/channel.py
Channel._basic_return
def _basic_return(self, frame_in): """Handle a Basic Return Frame and treat it as an error. :param specification.Basic.Return frame_in: Amqp frame. :return: """ reply_text = try_utf8_decode(frame_in.reply_text) message = ( "Message not delivered: %s (%s) to queue '%s' from exchange '%s'" % ( reply_text, frame_in.reply_code, frame_in.routing_key, frame_in.exchange ) ) exception = AMQPMessageError(message, reply_code=frame_in.reply_code) self.exceptions.append(exception)
python
def _basic_return(self, frame_in): """Handle a Basic Return Frame and treat it as an error. :param specification.Basic.Return frame_in: Amqp frame. :return: """ reply_text = try_utf8_decode(frame_in.reply_text) message = ( "Message not delivered: %s (%s) to queue '%s' from exchange '%s'" % ( reply_text, frame_in.reply_code, frame_in.routing_key, frame_in.exchange ) ) exception = AMQPMessageError(message, reply_code=frame_in.reply_code) self.exceptions.append(exception)
Handle a Basic Return Frame and treat it as an error. :param specification.Basic.Return frame_in: Amqp frame. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L369-L388
eandersson/amqpstorm
amqpstorm/channel.py
Channel._build_message
def _build_message(self, auto_decode): """Fetch and build a complete Message from the inbound queue. :param bool auto_decode: Auto-decode strings when possible. :rtype: Message """ with self.lock: if len(self._inbound) < 2: return None headers = self._build_message_headers() if not headers: return None basic_deliver, content_header = headers body = self._build_message_body(content_header.body_size) message = Message(channel=self, body=body, method=dict(basic_deliver), properties=dict(content_header.properties), auto_decode=auto_decode) return message
python
def _build_message(self, auto_decode): """Fetch and build a complete Message from the inbound queue. :param bool auto_decode: Auto-decode strings when possible. :rtype: Message """ with self.lock: if len(self._inbound) < 2: return None headers = self._build_message_headers() if not headers: return None basic_deliver, content_header = headers body = self._build_message_body(content_header.body_size) message = Message(channel=self, body=body, method=dict(basic_deliver), properties=dict(content_header.properties), auto_decode=auto_decode) return message
Fetch and build a complete Message from the inbound queue. :param bool auto_decode: Auto-decode strings when possible. :rtype: Message
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L390-L411
eandersson/amqpstorm
amqpstorm/channel.py
Channel._build_message_headers
def _build_message_headers(self): """Fetch Message Headers (Deliver & Header Frames). :rtype: tuple|None """ basic_deliver = self._inbound.pop(0) if not isinstance(basic_deliver, specification.Basic.Deliver): LOGGER.warning( 'Received an out-of-order frame: %s was ' 'expecting a Basic.Deliver frame', type(basic_deliver) ) return None content_header = self._inbound.pop(0) if not isinstance(content_header, ContentHeader): LOGGER.warning( 'Received an out-of-order frame: %s was ' 'expecting a ContentHeader frame', type(content_header) ) return None return basic_deliver, content_header
python
def _build_message_headers(self): """Fetch Message Headers (Deliver & Header Frames). :rtype: tuple|None """ basic_deliver = self._inbound.pop(0) if not isinstance(basic_deliver, specification.Basic.Deliver): LOGGER.warning( 'Received an out-of-order frame: %s was ' 'expecting a Basic.Deliver frame', type(basic_deliver) ) return None content_header = self._inbound.pop(0) if not isinstance(content_header, ContentHeader): LOGGER.warning( 'Received an out-of-order frame: %s was ' 'expecting a ContentHeader frame', type(content_header) ) return None return basic_deliver, content_header
Fetch Message Headers (Deliver & Header Frames). :rtype: tuple|None
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L413-L435
eandersson/amqpstorm
amqpstorm/channel.py
Channel._build_message_body
def _build_message_body(self, body_size): """Build the Message body from the inbound queue. :rtype: str """ body = bytes() while len(body) < body_size: if not self._inbound: self.check_for_errors() sleep(IDLE_WAIT) continue body_piece = self._inbound.pop(0) if not body_piece.value: break body += body_piece.value return body
python
def _build_message_body(self, body_size): """Build the Message body from the inbound queue. :rtype: str """ body = bytes() while len(body) < body_size: if not self._inbound: self.check_for_errors() sleep(IDLE_WAIT) continue body_piece = self._inbound.pop(0) if not body_piece.value: break body += body_piece.value return body
Build the Message body from the inbound queue. :rtype: str
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L437-L452
eandersson/amqpstorm
amqpstorm/channel.py
Channel._close_channel
def _close_channel(self, frame_in): """Close Channel. :param specification.Channel.Close frame_in: Channel Close frame. :return: """ if frame_in.reply_code != 200: reply_text = try_utf8_decode(frame_in.reply_text) message = ( 'Channel %d was closed by remote server: %s' % ( self._channel_id, reply_text ) ) exception = AMQPChannelError(message, reply_code=frame_in.reply_code) self.exceptions.append(exception) self.set_state(self.CLOSED) if self._connection.is_open: try: self._connection.write_frame( self.channel_id, specification.Channel.CloseOk() ) except AMQPConnectionError: pass self.close()
python
def _close_channel(self, frame_in): """Close Channel. :param specification.Channel.Close frame_in: Channel Close frame. :return: """ if frame_in.reply_code != 200: reply_text = try_utf8_decode(frame_in.reply_text) message = ( 'Channel %d was closed by remote server: %s' % ( self._channel_id, reply_text ) ) exception = AMQPChannelError(message, reply_code=frame_in.reply_code) self.exceptions.append(exception) self.set_state(self.CLOSED) if self._connection.is_open: try: self._connection.write_frame( self.channel_id, specification.Channel.CloseOk() ) except AMQPConnectionError: pass self.close()
Close Channel. :param specification.Channel.Close frame_in: Channel Close frame. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L454-L481
eandersson/amqpstorm
amqpstorm/management/user.py
User.create
def create(self, username, password, tags=''): """Create User. :param str username: Username :param str password: Password :param str tags: Comma-separate list of tags (e.g. monitoring) :rtype: None """ user_payload = json.dumps({ 'password': password, 'tags': tags }) return self.http_client.put(API_USER % username, payload=user_payload)
python
def create(self, username, password, tags=''): """Create User. :param str username: Username :param str password: Password :param str tags: Comma-separate list of tags (e.g. monitoring) :rtype: None """ user_payload = json.dumps({ 'password': password, 'tags': tags }) return self.http_client.put(API_USER % username, payload=user_payload)
Create User. :param str username: Username :param str password: Password :param str tags: Comma-separate list of tags (e.g. monitoring) :rtype: None
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/user.py#L28-L42
eandersson/amqpstorm
amqpstorm/management/user.py
User.get_permission
def get_permission(self, username, virtual_host): """Get User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ virtual_host = quote(virtual_host, '') return self.http_client.get(API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ))
python
def get_permission(self, username, virtual_host): """Get User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ virtual_host = quote(virtual_host, '') return self.http_client.get(API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ))
Get User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/user.py#L53-L69
eandersson/amqpstorm
amqpstorm/management/user.py
User.set_permission
def set_permission(self, username, virtual_host, configure_regex='.*', write_regex='.*', read_regex='.*'): """Set User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :param str configure_regex: Permission pattern for configuration operations for this user. :param str write_regex: Permission pattern for write operations for this user. :param str read_regex: Permission pattern for read operations for this user. :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ virtual_host = quote(virtual_host, '') permission_payload = json.dumps({ "configure": configure_regex, "read": read_regex, "write": write_regex }) return self.http_client.put(API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ), payload=permission_payload)
python
def set_permission(self, username, virtual_host, configure_regex='.*', write_regex='.*', read_regex='.*'): """Set User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :param str configure_regex: Permission pattern for configuration operations for this user. :param str write_regex: Permission pattern for write operations for this user. :param str read_regex: Permission pattern for read operations for this user. :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ virtual_host = quote(virtual_host, '') permission_payload = json.dumps({ "configure": configure_regex, "read": read_regex, "write": write_regex }) return self.http_client.put(API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ), payload=permission_payload)
Set User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :param str configure_regex: Permission pattern for configuration operations for this user. :param str write_regex: Permission pattern for write operations for this user. :param str read_regex: Permission pattern for read operations for this user. :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/user.py#L86-L115
eandersson/amqpstorm
amqpstorm/management/user.py
User.delete_permission
def delete_permission(self, username, virtual_host): """Delete User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ virtual_host = quote(virtual_host, '') return self.http_client.delete( API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ))
python
def delete_permission(self, username, virtual_host): """Delete User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ virtual_host = quote(virtual_host, '') return self.http_client.delete( API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ))
Delete User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/user.py#L117-L134
eandersson/amqpstorm
examples/scalable_rpc_server.py
ScalableRpcServer.start_server
def start_server(self): """Start the RPC Server. :return: """ self._stopped.clear() if not self._connection or self._connection.is_closed: self._create_connection() while not self._stopped.is_set(): try: # Check our connection for errors. self._connection.check_for_errors() self._update_consumers() except amqpstorm.AMQPError as why: # If an error occurs, re-connect and let update_consumers # re-open the channels. LOGGER.warning(why) self._stop_consumers() self._create_connection() time.sleep(1)
python
def start_server(self): """Start the RPC Server. :return: """ self._stopped.clear() if not self._connection or self._connection.is_closed: self._create_connection() while not self._stopped.is_set(): try: # Check our connection for errors. self._connection.check_for_errors() self._update_consumers() except amqpstorm.AMQPError as why: # If an error occurs, re-connect and let update_consumers # re-open the channels. LOGGER.warning(why) self._stop_consumers() self._create_connection() time.sleep(1)
Start the RPC Server. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/examples/scalable_rpc_server.py#L41-L60
eandersson/amqpstorm
examples/scalable_rpc_server.py
ScalableRpcServer._update_consumers
def _update_consumers(self): """Update Consumers. - Add more if requested. - Make sure the consumers are healthy. - Remove excess consumers. :return: """ # Do we need to start more consumers. consumer_to_start = \ min(max(self.number_of_consumers - len(self._consumers), 0), 2) for _ in range(consumer_to_start): consumer = Consumer(self.rpc_queue) self._start_consumer(consumer) self._consumers.append(consumer) # Check that all our consumers are active. for consumer in self._consumers: if consumer.active: continue self._start_consumer(consumer) break # Do we have any overflow of consumers. self._stop_consumers(self.number_of_consumers)
python
def _update_consumers(self): """Update Consumers. - Add more if requested. - Make sure the consumers are healthy. - Remove excess consumers. :return: """ # Do we need to start more consumers. consumer_to_start = \ min(max(self.number_of_consumers - len(self._consumers), 0), 2) for _ in range(consumer_to_start): consumer = Consumer(self.rpc_queue) self._start_consumer(consumer) self._consumers.append(consumer) # Check that all our consumers are active. for consumer in self._consumers: if consumer.active: continue self._start_consumer(consumer) break # Do we have any overflow of consumers. self._stop_consumers(self.number_of_consumers)
Update Consumers. - Add more if requested. - Make sure the consumers are healthy. - Remove excess consumers. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/examples/scalable_rpc_server.py#L112-L137
eandersson/amqpstorm
amqpstorm/management/http_client.py
HTTPClient.get
def get(self, path, payload=None, headers=None): """HTTP GET operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response """ return self._request('get', path, payload, headers)
python
def get(self, path, payload=None, headers=None): """HTTP GET operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response """ return self._request('get', path, payload, headers)
HTTP GET operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/http_client.py#L17-L29
eandersson/amqpstorm
amqpstorm/management/http_client.py
HTTPClient.post
def post(self, path, payload=None, headers=None): """HTTP POST operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response """ return self._request('post', path, payload, headers)
python
def post(self, path, payload=None, headers=None): """HTTP POST operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response """ return self._request('post', path, payload, headers)
HTTP POST operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/http_client.py#L31-L43
eandersson/amqpstorm
amqpstorm/management/http_client.py
HTTPClient.delete
def delete(self, path, payload=None, headers=None): """HTTP DELETE operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response """ return self._request('delete', path, payload, headers)
python
def delete(self, path, payload=None, headers=None): """HTTP DELETE operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response """ return self._request('delete', path, payload, headers)
HTTP DELETE operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/http_client.py#L45-L57
eandersson/amqpstorm
amqpstorm/management/http_client.py
HTTPClient.put
def put(self, path, payload=None, headers=None): """HTTP PUT operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response """ return self._request('put', path, payload, headers)
python
def put(self, path, payload=None, headers=None): """HTTP PUT operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response """ return self._request('put', path, payload, headers)
HTTP PUT operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/http_client.py#L59-L71
eandersson/amqpstorm
amqpstorm/management/http_client.py
HTTPClient._request
def _request(self, method, path, payload=None, headers=None): """HTTP operation. :param method: Operation type (e.g. post) :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response """ url = urlparse.urljoin(self._base_url, 'api/%s' % path) headers = headers or {} headers['content-type'] = 'application/json' try: response = requests.request( method, url, auth=self._auth, data=payload, headers=headers, cert=self._cert, verify=self._verify, timeout=self._timeout ) except requests.RequestException as why: raise ApiConnectionError(str(why)) json_response = self._get_json_output(response) self._check_for_errors(response, json_response) return json_response
python
def _request(self, method, path, payload=None, headers=None): """HTTP operation. :param method: Operation type (e.g. post) :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response """ url = urlparse.urljoin(self._base_url, 'api/%s' % path) headers = headers or {} headers['content-type'] = 'application/json' try: response = requests.request( method, url, auth=self._auth, data=payload, headers=headers, cert=self._cert, verify=self._verify, timeout=self._timeout ) except requests.RequestException as why: raise ApiConnectionError(str(why)) json_response = self._get_json_output(response) self._check_for_errors(response, json_response) return json_response
HTTP operation. :param method: Operation type (e.g. post) :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/http_client.py#L73-L104
eandersson/amqpstorm
amqpstorm/management/http_client.py
HTTPClient._check_for_errors
def _check_for_errors(response, json_response): """Check payload for errors. :param response: HTTP response :param json_response: Json response :raises ApiError: Raises if the remote server encountered an error. :return: """ status_code = response.status_code try: response.raise_for_status() except requests.HTTPError as why: raise ApiError(str(why), reply_code=status_code) if isinstance(json_response, dict) and 'error' in json_response: raise ApiError(json_response['error'], reply_code=status_code)
python
def _check_for_errors(response, json_response): """Check payload for errors. :param response: HTTP response :param json_response: Json response :raises ApiError: Raises if the remote server encountered an error. :return: """ status_code = response.status_code try: response.raise_for_status() except requests.HTTPError as why: raise ApiError(str(why), reply_code=status_code) if isinstance(json_response, dict) and 'error' in json_response: raise ApiError(json_response['error'], reply_code=status_code)
Check payload for errors. :param response: HTTP response :param json_response: Json response :raises ApiError: Raises if the remote server encountered an error. :return:
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/http_client.py#L121-L137
eandersson/amqpstorm
amqpstorm/management/healthchecks.py
HealthChecks.get
def get(self, node=None): """Run basic healthchecks against the current node, or against a given node. Example response: > {"status":"ok"} > {"status":"failed","reason":"string"} :param node: Node name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ if not node: return self.http_client.get(HEALTHCHECKS) return self.http_client.get(HEALTHCHECKS_NODE % node)
python
def get(self, node=None): """Run basic healthchecks against the current node, or against a given node. Example response: > {"status":"ok"} > {"status":"failed","reason":"string"} :param node: Node name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ if not node: return self.http_client.get(HEALTHCHECKS) return self.http_client.get(HEALTHCHECKS_NODE % node)
Run basic healthchecks against the current node, or against a given node. Example response: > {"status":"ok"} > {"status":"failed","reason":"string"} :param node: Node name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/healthchecks.py#L8-L25
cggh/scikit-allel
allel/stats/diversity.py
mean_pairwise_difference
def mean_pairwise_difference(ac, an=None, fill=np.nan): """Calculate for each variant the mean number of pairwise differences between chromosomes sampled from within a single population. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. an : array_like, int, shape (n_variants,), optional Allele numbers. If not provided, will be calculated from `ac`. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- mpd : ndarray, float, shape (n_variants,) Notes ----- The values returned by this function can be summed over a genome region and divided by the number of accessible bases to estimate nucleotide diversity, a.k.a. *pi*. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1]]) >>> ac = h.count_alleles() >>> allel.mean_pairwise_difference(ac) array([0. , 0.5 , 0.66666667, 0.5 , 0. , 0.83333333, 0.83333333, 1. ]) See Also -------- sequence_diversity, windowed_diversity """ # This function calculates the mean number of pairwise differences # between haplotypes within a single population, generalising to any number # of alleles. # check inputs ac = asarray_ndim(ac, 2) # total number of haplotypes if an is None: an = np.sum(ac, axis=1) else: an = asarray_ndim(an, 1) check_dim0_aligned(ac, an) # total number of pairwise comparisons for each variant: # (an choose 2) n_pairs = an * (an - 1) / 2 # number of pairwise comparisons where there is no difference: # sum of (ac choose 2) for each allele (i.e., number of ways to # choose the same allele twice) n_same = np.sum(ac * (ac - 1) / 2, axis=1) # number of pairwise differences n_diff = n_pairs - n_same # mean number of pairwise differences, accounting for cases where # there are no pairs with ignore_invalid(): mpd = np.where(n_pairs > 0, n_diff / n_pairs, fill) return mpd
python
def mean_pairwise_difference(ac, an=None, fill=np.nan): """Calculate for each variant the mean number of pairwise differences between chromosomes sampled from within a single population. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. an : array_like, int, shape (n_variants,), optional Allele numbers. If not provided, will be calculated from `ac`. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- mpd : ndarray, float, shape (n_variants,) Notes ----- The values returned by this function can be summed over a genome region and divided by the number of accessible bases to estimate nucleotide diversity, a.k.a. *pi*. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1]]) >>> ac = h.count_alleles() >>> allel.mean_pairwise_difference(ac) array([0. , 0.5 , 0.66666667, 0.5 , 0. , 0.83333333, 0.83333333, 1. ]) See Also -------- sequence_diversity, windowed_diversity """ # This function calculates the mean number of pairwise differences # between haplotypes within a single population, generalising to any number # of alleles. # check inputs ac = asarray_ndim(ac, 2) # total number of haplotypes if an is None: an = np.sum(ac, axis=1) else: an = asarray_ndim(an, 1) check_dim0_aligned(ac, an) # total number of pairwise comparisons for each variant: # (an choose 2) n_pairs = an * (an - 1) / 2 # number of pairwise comparisons where there is no difference: # sum of (ac choose 2) for each allele (i.e., number of ways to # choose the same allele twice) n_same = np.sum(ac * (ac - 1) / 2, axis=1) # number of pairwise differences n_diff = n_pairs - n_same # mean number of pairwise differences, accounting for cases where # there are no pairs with ignore_invalid(): mpd = np.where(n_pairs > 0, n_diff / n_pairs, fill) return mpd
Calculate for each variant the mean number of pairwise differences between chromosomes sampled from within a single population. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. an : array_like, int, shape (n_variants,), optional Allele numbers. If not provided, will be calculated from `ac`. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- mpd : ndarray, float, shape (n_variants,) Notes ----- The values returned by this function can be summed over a genome region and divided by the number of accessible bases to estimate nucleotide diversity, a.k.a. *pi*. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1]]) >>> ac = h.count_alleles() >>> allel.mean_pairwise_difference(ac) array([0. , 0.5 , 0.66666667, 0.5 , 0. , 0.83333333, 0.83333333, 1. ]) See Also -------- sequence_diversity, windowed_diversity
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L22-L104
cggh/scikit-allel
allel/stats/diversity.py
mean_pairwise_difference_between
def mean_pairwise_difference_between(ac1, ac2, an1=None, an2=None, fill=np.nan): """Calculate for each variant the mean number of pairwise differences between chromosomes sampled from two different populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. an1 : array_like, int, shape (n_variants,), optional Allele numbers for the first population. If not provided, will be calculated from `ac1`. an2 : array_like, int, shape (n_variants,), optional Allele numbers for the second population. If not provided, will be calculated from `ac2`. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- mpd : ndarray, float, shape (n_variants,) Notes ----- The values returned by this function can be summed over a genome region and divided by the number of accessible bases to estimate nucleotide divergence between two populations, a.k.a. *Dxy*. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1]]) >>> ac1 = h.count_alleles(subpop=[0, 1]) >>> ac2 = h.count_alleles(subpop=[2, 3]) >>> allel.mean_pairwise_difference_between(ac1, ac2) array([0. , 0.5 , 1. , 0.5 , 0. , 1. , 0.75, nan]) See Also -------- sequence_divergence, windowed_divergence """ # This function calculates the mean number of pairwise differences # between haplotypes from two different populations, generalising to any # number of alleles. # check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # total number of haplotypes sampled from each population if an1 is None: an1 = np.sum(ac1, axis=1) else: an1 = asarray_ndim(an1, 1) check_dim0_aligned(ac1, an1) if an2 is None: an2 = np.sum(ac2, axis=1) else: an2 = asarray_ndim(an2, 1) check_dim0_aligned(ac2, an2) # total number of pairwise comparisons for each variant n_pairs = an1 * an2 # number of pairwise comparisons where there is no difference: # sum of (ac1 * ac2) for each allele (i.e., number of ways to # choose the same allele twice) n_same = np.sum(ac1 * ac2, axis=1) # number of pairwise differences n_diff = n_pairs - n_same # mean number of pairwise differences, accounting for cases where # there are no pairs with ignore_invalid(): mpd = np.where(n_pairs > 0, n_diff / n_pairs, fill) return mpd
python
def mean_pairwise_difference_between(ac1, ac2, an1=None, an2=None, fill=np.nan): """Calculate for each variant the mean number of pairwise differences between chromosomes sampled from two different populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. an1 : array_like, int, shape (n_variants,), optional Allele numbers for the first population. If not provided, will be calculated from `ac1`. an2 : array_like, int, shape (n_variants,), optional Allele numbers for the second population. If not provided, will be calculated from `ac2`. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- mpd : ndarray, float, shape (n_variants,) Notes ----- The values returned by this function can be summed over a genome region and divided by the number of accessible bases to estimate nucleotide divergence between two populations, a.k.a. *Dxy*. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1]]) >>> ac1 = h.count_alleles(subpop=[0, 1]) >>> ac2 = h.count_alleles(subpop=[2, 3]) >>> allel.mean_pairwise_difference_between(ac1, ac2) array([0. , 0.5 , 1. , 0.5 , 0. , 1. , 0.75, nan]) See Also -------- sequence_divergence, windowed_divergence """ # This function calculates the mean number of pairwise differences # between haplotypes from two different populations, generalising to any # number of alleles. # check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # total number of haplotypes sampled from each population if an1 is None: an1 = np.sum(ac1, axis=1) else: an1 = asarray_ndim(an1, 1) check_dim0_aligned(ac1, an1) if an2 is None: an2 = np.sum(ac2, axis=1) else: an2 = asarray_ndim(an2, 1) check_dim0_aligned(ac2, an2) # total number of pairwise comparisons for each variant n_pairs = an1 * an2 # number of pairwise comparisons where there is no difference: # sum of (ac1 * ac2) for each allele (i.e., number of ways to # choose the same allele twice) n_same = np.sum(ac1 * ac2, axis=1) # number of pairwise differences n_diff = n_pairs - n_same # mean number of pairwise differences, accounting for cases where # there are no pairs with ignore_invalid(): mpd = np.where(n_pairs > 0, n_diff / n_pairs, fill) return mpd
Calculate for each variant the mean number of pairwise differences between chromosomes sampled from two different populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. an1 : array_like, int, shape (n_variants,), optional Allele numbers for the first population. If not provided, will be calculated from `ac1`. an2 : array_like, int, shape (n_variants,), optional Allele numbers for the second population. If not provided, will be calculated from `ac2`. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- mpd : ndarray, float, shape (n_variants,) Notes ----- The values returned by this function can be summed over a genome region and divided by the number of accessible bases to estimate nucleotide divergence between two populations, a.k.a. *Dxy*. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1]]) >>> ac1 = h.count_alleles(subpop=[0, 1]) >>> ac2 = h.count_alleles(subpop=[2, 3]) >>> allel.mean_pairwise_difference_between(ac1, ac2) array([0. , 0.5 , 1. , 0.5 , 0. , 1. , 0.75, nan]) See Also -------- sequence_divergence, windowed_divergence
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L107-L203
cggh/scikit-allel
allel/stats/diversity.py
sequence_diversity
def sequence_diversity(pos, ac, start=None, stop=None, is_accessible=None): """Estimate nucleotide diversity within a given region, which is the average proportion of sites (including monomorphic sites not present in the data) that differ between randomly chosen pairs of chromosomes. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- pi : ndarray, float, shape (n_windows,) Nucleotide diversity. Notes ----- If start and/or stop are not provided, uses the difference between the last and the first position as a proxy for the total number of sites, which can overestimate the sequence diversity. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> pi = allel.sequence_diversity(pos, ac, start=1, stop=31) >>> pi 0.13978494623655915 """ # check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) ac = asarray_ndim(ac, 2) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) # deal with subregion if start is not None or stop is not None: loc = pos.locate_range(start, stop) pos = pos[loc] ac = ac[loc] if start is None: start = pos[0] if stop is None: stop = pos[-1] # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # sum differences over variants mpd_sum = np.sum(mpd) # calculate value per base if is_accessible is None: n_bases = stop - start + 1 else: n_bases = np.count_nonzero(is_accessible[start-1:stop]) pi = mpd_sum / n_bases return pi
python
def sequence_diversity(pos, ac, start=None, stop=None, is_accessible=None): """Estimate nucleotide diversity within a given region, which is the average proportion of sites (including monomorphic sites not present in the data) that differ between randomly chosen pairs of chromosomes. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- pi : ndarray, float, shape (n_windows,) Nucleotide diversity. Notes ----- If start and/or stop are not provided, uses the difference between the last and the first position as a proxy for the total number of sites, which can overestimate the sequence diversity. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> pi = allel.sequence_diversity(pos, ac, start=1, stop=31) >>> pi 0.13978494623655915 """ # check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) ac = asarray_ndim(ac, 2) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) # deal with subregion if start is not None or stop is not None: loc = pos.locate_range(start, stop) pos = pos[loc] ac = ac[loc] if start is None: start = pos[0] if stop is None: stop = pos[-1] # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # sum differences over variants mpd_sum = np.sum(mpd) # calculate value per base if is_accessible is None: n_bases = stop - start + 1 else: n_bases = np.count_nonzero(is_accessible[start-1:stop]) pi = mpd_sum / n_bases return pi
Estimate nucleotide diversity within a given region, which is the average proportion of sites (including monomorphic sites not present in the data) that differ between randomly chosen pairs of chromosomes. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- pi : ndarray, float, shape (n_windows,) Nucleotide diversity. Notes ----- If start and/or stop are not provided, uses the difference between the last and the first position as a proxy for the total number of sites, which can overestimate the sequence diversity. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> pi = allel.sequence_diversity(pos, ac, start=1, stop=31) >>> pi 0.13978494623655915
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L206-L290
cggh/scikit-allel
allel/stats/diversity.py
sequence_divergence
def sequence_divergence(pos, ac1, ac2, an1=None, an2=None, start=None, stop=None, is_accessible=None): """Estimate nucleotide divergence between two populations within a given region, which is the average proportion of sites (including monomorphic sites not present in the data) that differ between randomly chosen pairs of chromosomes, one from each population. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. an1 : array_like, int, shape (n_variants,), optional Allele numbers for the first population. If not provided, will be calculated from `ac1`. an2 : array_like, int, shape (n_variants,), optional Allele numbers for the second population. If not provided, will be calculated from `ac2`. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- Dxy : ndarray, float, shape (n_windows,) Nucleotide divergence. Examples -------- Simplest case, two haplotypes in each population:: >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1], ... [-1, -1, -1, -1]]) >>> ac1 = h.count_alleles(subpop=[0, 1]) >>> ac2 = h.count_alleles(subpop=[2, 3]) >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> dxy = sequence_divergence(pos, ac1, ac2, start=1, stop=31) >>> dxy 0.12096774193548387 """ # check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) if an1 is not None: an1 = asarray_ndim(an1, 1) if an2 is not None: an2 = asarray_ndim(an2, 1) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) # handle start/stop if start is not None or stop is not None: loc = pos.locate_range(start, stop) pos = pos[loc] ac1 = ac1[loc] ac2 = ac2[loc] if an1 is not None: an1 = an1[loc] if an2 is not None: an2 = an2[loc] if start is None: start = pos[0] if stop is None: stop = pos[-1] # calculate mean pairwise difference between the two populations mpd = mean_pairwise_difference_between(ac1, ac2, an1=an1, an2=an2, fill=0) # sum differences over variants mpd_sum = np.sum(mpd) # calculate value per base, N.B., expect pos is 1-based if is_accessible is None: n_bases = stop - start + 1 else: n_bases = np.count_nonzero(is_accessible[start-1:stop]) dxy = mpd_sum / n_bases return dxy
python
def sequence_divergence(pos, ac1, ac2, an1=None, an2=None, start=None, stop=None, is_accessible=None): """Estimate nucleotide divergence between two populations within a given region, which is the average proportion of sites (including monomorphic sites not present in the data) that differ between randomly chosen pairs of chromosomes, one from each population. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. an1 : array_like, int, shape (n_variants,), optional Allele numbers for the first population. If not provided, will be calculated from `ac1`. an2 : array_like, int, shape (n_variants,), optional Allele numbers for the second population. If not provided, will be calculated from `ac2`. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- Dxy : ndarray, float, shape (n_windows,) Nucleotide divergence. Examples -------- Simplest case, two haplotypes in each population:: >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1], ... [-1, -1, -1, -1]]) >>> ac1 = h.count_alleles(subpop=[0, 1]) >>> ac2 = h.count_alleles(subpop=[2, 3]) >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> dxy = sequence_divergence(pos, ac1, ac2, start=1, stop=31) >>> dxy 0.12096774193548387 """ # check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) if an1 is not None: an1 = asarray_ndim(an1, 1) if an2 is not None: an2 = asarray_ndim(an2, 1) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) # handle start/stop if start is not None or stop is not None: loc = pos.locate_range(start, stop) pos = pos[loc] ac1 = ac1[loc] ac2 = ac2[loc] if an1 is not None: an1 = an1[loc] if an2 is not None: an2 = an2[loc] if start is None: start = pos[0] if stop is None: stop = pos[-1] # calculate mean pairwise difference between the two populations mpd = mean_pairwise_difference_between(ac1, ac2, an1=an1, an2=an2, fill=0) # sum differences over variants mpd_sum = np.sum(mpd) # calculate value per base, N.B., expect pos is 1-based if is_accessible is None: n_bases = stop - start + 1 else: n_bases = np.count_nonzero(is_accessible[start-1:stop]) dxy = mpd_sum / n_bases return dxy
Estimate nucleotide divergence between two populations within a given region, which is the average proportion of sites (including monomorphic sites not present in the data) that differ between randomly chosen pairs of chromosomes, one from each population. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. an1 : array_like, int, shape (n_variants,), optional Allele numbers for the first population. If not provided, will be calculated from `ac1`. an2 : array_like, int, shape (n_variants,), optional Allele numbers for the second population. If not provided, will be calculated from `ac2`. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- Dxy : ndarray, float, shape (n_windows,) Nucleotide divergence. Examples -------- Simplest case, two haplotypes in each population:: >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1], ... [-1, -1, -1, -1]]) >>> ac1 = h.count_alleles(subpop=[0, 1]) >>> ac2 = h.count_alleles(subpop=[2, 3]) >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> dxy = sequence_divergence(pos, ac1, ac2, start=1, stop=31) >>> dxy 0.12096774193548387
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L293-L393
cggh/scikit-allel
allel/stats/diversity.py
windowed_diversity
def windowed_diversity(pos, ac, size=None, start=None, stop=None, step=None, windows=None, is_accessible=None, fill=np.nan): """Estimate nucleotide diversity in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- pi : ndarray, float, shape (n_windows,) Nucleotide diversity in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> pi, windows, n_bases, counts = allel.windowed_diversity( ... pos, ac, size=10, start=1, stop=31 ... ) >>> pi array([0.11666667, 0.21666667, 0.09090909]) >>> windows array([[ 1, 10], [11, 20], [21, 31]]) >>> n_bases array([10, 10, 11]) >>> counts array([3, 4, 2]) """ # check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # sum differences in windows mpd_sum, windows, counts = windowed_statistic( pos, values=mpd, statistic=np.sum, size=size, start=start, stop=stop, step=step, windows=windows, fill=0 ) # calculate value per base pi, n_bases = per_base(mpd_sum, windows, is_accessible=is_accessible, fill=fill) return pi, windows, n_bases, counts
python
def windowed_diversity(pos, ac, size=None, start=None, stop=None, step=None, windows=None, is_accessible=None, fill=np.nan): """Estimate nucleotide diversity in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- pi : ndarray, float, shape (n_windows,) Nucleotide diversity in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> pi, windows, n_bases, counts = allel.windowed_diversity( ... pos, ac, size=10, start=1, stop=31 ... ) >>> pi array([0.11666667, 0.21666667, 0.09090909]) >>> windows array([[ 1, 10], [11, 20], [21, 31]]) >>> n_bases array([10, 10, 11]) >>> counts array([3, 4, 2]) """ # check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # sum differences in windows mpd_sum, windows, counts = windowed_statistic( pos, values=mpd, statistic=np.sum, size=size, start=start, stop=stop, step=step, windows=windows, fill=0 ) # calculate value per base pi, n_bases = per_base(mpd_sum, windows, is_accessible=is_accessible, fill=fill) return pi, windows, n_bases, counts
Estimate nucleotide diversity in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- pi : ndarray, float, shape (n_windows,) Nucleotide diversity in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> pi, windows, n_bases, counts = allel.windowed_diversity( ... pos, ac, size=10, start=1, stop=31 ... ) >>> pi array([0.11666667, 0.21666667, 0.09090909]) >>> windows array([[ 1, 10], [11, 20], [21, 31]]) >>> n_bases array([10, 10, 11]) >>> counts array([3, 4, 2])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L396-L489
cggh/scikit-allel
allel/stats/diversity.py
windowed_divergence
def windowed_divergence(pos, ac1, ac2, size=None, start=None, stop=None, step=None, windows=None, is_accessible=None, fill=np.nan): """Estimate nucleotide divergence between two populations in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- Dxy : ndarray, float, shape (n_windows,) Nucleotide divergence in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. Examples -------- Simplest case, two haplotypes in each population:: >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1], ... [-1, -1, -1, -1]]) >>> ac1 = h.count_alleles(subpop=[0, 1]) >>> ac2 = h.count_alleles(subpop=[2, 3]) >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> dxy, windows, n_bases, counts = windowed_divergence( ... pos, ac1, ac2, size=10, start=1, stop=31 ... ) >>> dxy array([0.15 , 0.225, 0. ]) >>> windows array([[ 1, 10], [11, 20], [21, 31]]) >>> n_bases array([10, 10, 11]) >>> counts array([3, 4, 2]) """ # check inputs pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) # calculate mean pairwise divergence mpd = mean_pairwise_difference_between(ac1, ac2, fill=0) # sum in windows mpd_sum, windows, counts = windowed_statistic( pos, values=mpd, statistic=np.sum, size=size, start=start, stop=stop, step=step, windows=windows, fill=0 ) # calculate value per base dxy, n_bases = per_base(mpd_sum, windows, is_accessible=is_accessible, fill=fill) return dxy, windows, n_bases, counts
python
def windowed_divergence(pos, ac1, ac2, size=None, start=None, stop=None, step=None, windows=None, is_accessible=None, fill=np.nan): """Estimate nucleotide divergence between two populations in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- Dxy : ndarray, float, shape (n_windows,) Nucleotide divergence in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. Examples -------- Simplest case, two haplotypes in each population:: >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1], ... [-1, -1, -1, -1]]) >>> ac1 = h.count_alleles(subpop=[0, 1]) >>> ac2 = h.count_alleles(subpop=[2, 3]) >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> dxy, windows, n_bases, counts = windowed_divergence( ... pos, ac1, ac2, size=10, start=1, stop=31 ... ) >>> dxy array([0.15 , 0.225, 0. ]) >>> windows array([[ 1, 10], [11, 20], [21, 31]]) >>> n_bases array([10, 10, 11]) >>> counts array([3, 4, 2]) """ # check inputs pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) # calculate mean pairwise divergence mpd = mean_pairwise_difference_between(ac1, ac2, fill=0) # sum in windows mpd_sum, windows, counts = windowed_statistic( pos, values=mpd, statistic=np.sum, size=size, start=start, stop=stop, step=step, windows=windows, fill=0 ) # calculate value per base dxy, n_bases = per_base(mpd_sum, windows, is_accessible=is_accessible, fill=fill) return dxy, windows, n_bases, counts
Estimate nucleotide divergence between two populations in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- Dxy : ndarray, float, shape (n_windows,) Nucleotide divergence in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. Examples -------- Simplest case, two haplotypes in each population:: >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1], ... [-1, -1, -1, -1]]) >>> ac1 = h.count_alleles(subpop=[0, 1]) >>> ac2 = h.count_alleles(subpop=[2, 3]) >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> dxy, windows, n_bases, counts = windowed_divergence( ... pos, ac1, ac2, size=10, start=1, stop=31 ... ) >>> dxy array([0.15 , 0.225, 0. ]) >>> windows array([[ 1, 10], [11, 20], [21, 31]]) >>> n_bases array([10, 10, 11]) >>> counts array([3, 4, 2])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L492-L590
cggh/scikit-allel
allel/stats/diversity.py
windowed_df
def windowed_df(pos, ac1, ac2, size=None, start=None, stop=None, step=None, windows=None, is_accessible=None, fill=np.nan): """Calculate the density of fixed differences between two populations in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- df : ndarray, float, shape (n_windows,) Per-base density of fixed differences in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. See Also -------- allel.model.locate_fixed_differences """ # check inputs pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) # locate fixed differences loc_df = locate_fixed_differences(ac1, ac2) # count number of fixed differences in windows n_df, windows, counts = windowed_statistic( pos, values=loc_df, statistic=np.count_nonzero, size=size, start=start, stop=stop, step=step, windows=windows, fill=0 ) # calculate value per base df, n_bases = per_base(n_df, windows, is_accessible=is_accessible, fill=fill) return df, windows, n_bases, counts
python
def windowed_df(pos, ac1, ac2, size=None, start=None, stop=None, step=None, windows=None, is_accessible=None, fill=np.nan): """Calculate the density of fixed differences between two populations in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- df : ndarray, float, shape (n_windows,) Per-base density of fixed differences in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. See Also -------- allel.model.locate_fixed_differences """ # check inputs pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) # locate fixed differences loc_df = locate_fixed_differences(ac1, ac2) # count number of fixed differences in windows n_df, windows, counts = windowed_statistic( pos, values=loc_df, statistic=np.count_nonzero, size=size, start=start, stop=stop, step=step, windows=windows, fill=0 ) # calculate value per base df, n_bases = per_base(n_df, windows, is_accessible=is_accessible, fill=fill) return df, windows, n_bases, counts
Calculate the density of fixed differences between two populations in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- df : ndarray, float, shape (n_windows,) Per-base density of fixed differences in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. See Also -------- allel.model.locate_fixed_differences
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L593-L663
cggh/scikit-allel
allel/stats/diversity.py
watterson_theta
def watterson_theta(pos, ac, start=None, stop=None, is_accessible=None): """Calculate the value of Watterson's estimator over a given region. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- theta_hat_w : float Watterson's estimator (theta hat per base). Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> theta_hat_w = allel.watterson_theta(pos, ac, start=1, stop=31) >>> theta_hat_w 0.10557184750733138 """ # check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # deal with subregion if start is not None or stop is not None: loc = pos.locate_range(start, stop) pos = pos[loc] ac = ac[loc] if start is None: start = pos[0] if stop is None: stop = pos[-1] # count segregating variants S = ac.count_segregating() # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # (n-1)th harmonic number a1 = np.sum(1 / np.arange(1, n)) # calculate absolute value theta_hat_w_abs = S / a1 # calculate value per base if is_accessible is None: n_bases = stop - start + 1 else: n_bases = np.count_nonzero(is_accessible[start-1:stop]) theta_hat_w = theta_hat_w_abs / n_bases return theta_hat_w
python
def watterson_theta(pos, ac, start=None, stop=None, is_accessible=None): """Calculate the value of Watterson's estimator over a given region. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- theta_hat_w : float Watterson's estimator (theta hat per base). Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> theta_hat_w = allel.watterson_theta(pos, ac, start=1, stop=31) >>> theta_hat_w 0.10557184750733138 """ # check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # deal with subregion if start is not None or stop is not None: loc = pos.locate_range(start, stop) pos = pos[loc] ac = ac[loc] if start is None: start = pos[0] if stop is None: stop = pos[-1] # count segregating variants S = ac.count_segregating() # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # (n-1)th harmonic number a1 = np.sum(1 / np.arange(1, n)) # calculate absolute value theta_hat_w_abs = S / a1 # calculate value per base if is_accessible is None: n_bases = stop - start + 1 else: n_bases = np.count_nonzero(is_accessible[start-1:stop]) theta_hat_w = theta_hat_w_abs / n_bases return theta_hat_w
Calculate the value of Watterson's estimator over a given region. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- theta_hat_w : float Watterson's estimator (theta hat per base). Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> theta_hat_w = allel.watterson_theta(pos, ac, start=1, stop=31) >>> theta_hat_w 0.10557184750733138
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L667-L749
cggh/scikit-allel
allel/stats/diversity.py
windowed_watterson_theta
def windowed_watterson_theta(pos, ac, size=None, start=None, stop=None, step=None, windows=None, is_accessible=None, fill=np.nan): """Calculate the value of Watterson's estimator in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- theta_hat_w : ndarray, float, shape (n_windows,) Watterson's estimator (theta hat per base). windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> theta_hat_w, windows, n_bases, counts = allel.windowed_watterson_theta( ... pos, ac, size=10, start=1, stop=31 ... ) >>> theta_hat_w array([0.10909091, 0.16363636, 0.04958678]) >>> windows array([[ 1, 10], [11, 20], [21, 31]]) >>> n_bases array([10, 10, 11]) >>> counts array([3, 4, 2]) """ # flake8: noqa # check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # locate segregating variants is_seg = ac.is_segregating() # count segregating variants in windows S, windows, counts = windowed_statistic(pos, is_seg, statistic=np.count_nonzero, size=size, start=start, stop=stop, step=step, windows=windows, fill=0) # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # (n-1)th harmonic number a1 = np.sum(1 / np.arange(1, n)) # absolute value of Watterson's theta theta_hat_w_abs = S / a1 # theta per base theta_hat_w, n_bases = per_base(theta_hat_w_abs, windows=windows, is_accessible=is_accessible, fill=fill) return theta_hat_w, windows, n_bases, counts
python
def windowed_watterson_theta(pos, ac, size=None, start=None, stop=None, step=None, windows=None, is_accessible=None, fill=np.nan): """Calculate the value of Watterson's estimator in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- theta_hat_w : ndarray, float, shape (n_windows,) Watterson's estimator (theta hat per base). windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> theta_hat_w, windows, n_bases, counts = allel.windowed_watterson_theta( ... pos, ac, size=10, start=1, stop=31 ... ) >>> theta_hat_w array([0.10909091, 0.16363636, 0.04958678]) >>> windows array([[ 1, 10], [11, 20], [21, 31]]) >>> n_bases array([10, 10, 11]) >>> counts array([3, 4, 2]) """ # flake8: noqa # check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # locate segregating variants is_seg = ac.is_segregating() # count segregating variants in windows S, windows, counts = windowed_statistic(pos, is_seg, statistic=np.count_nonzero, size=size, start=start, stop=stop, step=step, windows=windows, fill=0) # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # (n-1)th harmonic number a1 = np.sum(1 / np.arange(1, n)) # absolute value of Watterson's theta theta_hat_w_abs = S / a1 # theta per base theta_hat_w, n_bases = per_base(theta_hat_w_abs, windows=windows, is_accessible=is_accessible, fill=fill) return theta_hat_w, windows, n_bases, counts
Calculate the value of Watterson's estimator in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- theta_hat_w : ndarray, float, shape (n_windows,) Watterson's estimator (theta hat per base). windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> theta_hat_w, windows, n_bases, counts = allel.windowed_watterson_theta( ... pos, ac, size=10, start=1, stop=31 ... ) >>> theta_hat_w array([0.10909091, 0.16363636, 0.04958678]) >>> windows array([[ 1, 10], [11, 20], [21, 31]]) >>> n_bases array([10, 10, 11]) >>> counts array([3, 4, 2])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L753-L859
cggh/scikit-allel
allel/stats/diversity.py
tajima_d
def tajima_d(ac, pos=None, start=None, stop=None, min_sites=3): """Calculate the value of Tajima's D over a given region. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. pos : array_like, int, shape (n_items,), optional Variant positions, using 1-based coordinates, in ascending order. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- D : float Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> allel.tajima_d(ac) 3.1445848780213814 >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> allel.tajima_d(ac, pos=pos, start=7, stop=25) 3.8779735196179366 """ # check inputs if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # deal with subregion if pos is not None and (start is not None or stop is not None): if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) loc = pos.locate_range(start, stop) ac = ac[loc] # count segregating variants S = ac.count_segregating() if S < min_sites: return np.nan # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # (n-1)th harmonic number a1 = np.sum(1 / np.arange(1, n)) # calculate Watterson's theta (absolute value) theta_hat_w_abs = S / a1 # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # calculate theta_hat pi (sum differences over variants) theta_hat_pi_abs = np.sum(mpd) # N.B., both theta estimates are usually divided by the number of # (accessible) bases but here we want the absolute difference d = theta_hat_pi_abs - theta_hat_w_abs # calculate the denominator (standard deviation) a2 = np.sum(1 / (np.arange(1, n)**2)) b1 = (n + 1) / (3 * (n - 1)) b2 = 2 * (n**2 + n + 3) / (9 * n * (n - 1)) c1 = b1 - (1 / a1) c2 = b2 - ((n + 2) / (a1 * n)) + (a2 / (a1**2)) e1 = c1 / a1 e2 = c2 / (a1**2 + a2) d_stdev = np.sqrt((e1 * S) + (e2 * S * (S - 1))) # finally calculate Tajima's D D = d / d_stdev return D
python
def tajima_d(ac, pos=None, start=None, stop=None, min_sites=3): """Calculate the value of Tajima's D over a given region. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. pos : array_like, int, shape (n_items,), optional Variant positions, using 1-based coordinates, in ascending order. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- D : float Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> allel.tajima_d(ac) 3.1445848780213814 >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> allel.tajima_d(ac, pos=pos, start=7, stop=25) 3.8779735196179366 """ # check inputs if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # deal with subregion if pos is not None and (start is not None or stop is not None): if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) loc = pos.locate_range(start, stop) ac = ac[loc] # count segregating variants S = ac.count_segregating() if S < min_sites: return np.nan # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # (n-1)th harmonic number a1 = np.sum(1 / np.arange(1, n)) # calculate Watterson's theta (absolute value) theta_hat_w_abs = S / a1 # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # calculate theta_hat pi (sum differences over variants) theta_hat_pi_abs = np.sum(mpd) # N.B., both theta estimates are usually divided by the number of # (accessible) bases but here we want the absolute difference d = theta_hat_pi_abs - theta_hat_w_abs # calculate the denominator (standard deviation) a2 = np.sum(1 / (np.arange(1, n)**2)) b1 = (n + 1) / (3 * (n - 1)) b2 = 2 * (n**2 + n + 3) / (9 * n * (n - 1)) c1 = b1 - (1 / a1) c2 = b2 - ((n + 2) / (a1 * n)) + (a2 / (a1**2)) e1 = c1 / a1 e2 = c2 / (a1**2 + a2) d_stdev = np.sqrt((e1 * S) + (e2 * S * (S - 1))) # finally calculate Tajima's D D = d / d_stdev return D
Calculate the value of Tajima's D over a given region. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. pos : array_like, int, shape (n_items,), optional Variant positions, using 1-based coordinates, in ascending order. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- D : float Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> allel.tajima_d(ac) 3.1445848780213814 >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> allel.tajima_d(ac, pos=pos, start=7, stop=25) 3.8779735196179366
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L863-L954
cggh/scikit-allel
allel/stats/diversity.py
windowed_tajima_d
def windowed_tajima_d(pos, ac, size=None, start=None, stop=None, step=None, windows=None, min_sites=3): """Calculate the value of Tajima's D in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- D : ndarray, float, shape (n_windows,) Tajima's D. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 20, 22, 25, 27] >>> D, windows, counts = allel.windowed_tajima_d(pos, ac, size=20, step=10, start=1, stop=31) >>> D array([1.36521524, 4.22566622]) >>> windows array([[ 1, 20], [11, 31]]) >>> counts array([6, 6]) """ # check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # calculate constants a1 = np.sum(1 / np.arange(1, n)) a2 = np.sum(1 / (np.arange(1, n)**2)) b1 = (n + 1) / (3 * (n - 1)) b2 = 2 * (n**2 + n + 3) / (9 * n * (n - 1)) c1 = b1 - (1 / a1) c2 = b2 - ((n + 2) / (a1 * n)) + (a2 / (a1**2)) e1 = c1 / a1 e2 = c2 / (a1**2 + a2) # locate segregating variants is_seg = ac.is_segregating() # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # define statistic to compute for each window # noinspection PyPep8Naming def statistic(w_is_seg, w_mpd): S = np.count_nonzero(w_is_seg) if S < min_sites: return np.nan pi = np.sum(w_mpd) d = pi - (S / a1) d_stdev = np.sqrt((e1 * S) + (e2 * S * (S - 1))) wD = d / d_stdev return wD D, windows, counts = windowed_statistic(pos, values=(is_seg, mpd), statistic=statistic, size=size, start=start, stop=stop, step=step, windows=windows, fill=np.nan) return D, windows, counts
python
def windowed_tajima_d(pos, ac, size=None, start=None, stop=None, step=None, windows=None, min_sites=3): """Calculate the value of Tajima's D in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- D : ndarray, float, shape (n_windows,) Tajima's D. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 20, 22, 25, 27] >>> D, windows, counts = allel.windowed_tajima_d(pos, ac, size=20, step=10, start=1, stop=31) >>> D array([1.36521524, 4.22566622]) >>> windows array([[ 1, 20], [11, 31]]) >>> counts array([6, 6]) """ # check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # calculate constants a1 = np.sum(1 / np.arange(1, n)) a2 = np.sum(1 / (np.arange(1, n)**2)) b1 = (n + 1) / (3 * (n - 1)) b2 = 2 * (n**2 + n + 3) / (9 * n * (n - 1)) c1 = b1 - (1 / a1) c2 = b2 - ((n + 2) / (a1 * n)) + (a2 / (a1**2)) e1 = c1 / a1 e2 = c2 / (a1**2 + a2) # locate segregating variants is_seg = ac.is_segregating() # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # define statistic to compute for each window # noinspection PyPep8Naming def statistic(w_is_seg, w_mpd): S = np.count_nonzero(w_is_seg) if S < min_sites: return np.nan pi = np.sum(w_mpd) d = pi - (S / a1) d_stdev = np.sqrt((e1 * S) + (e2 * S * (S - 1))) wD = d / d_stdev return wD D, windows, counts = windowed_statistic(pos, values=(is_seg, mpd), statistic=statistic, size=size, start=start, stop=stop, step=step, windows=windows, fill=np.nan) return D, windows, counts
Calculate the value of Tajima's D in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- D : ndarray, float, shape (n_windows,) Tajima's D. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 20, 22, 25, 27] >>> D, windows, counts = allel.windowed_tajima_d(pos, ac, size=20, step=10, start=1, stop=31) >>> D array([1.36521524, 4.22566622]) >>> windows array([[ 1, 20], [11, 31]]) >>> counts array([6, 6])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L958-L1064
cggh/scikit-allel
allel/stats/diversity.py
moving_tajima_d
def moving_tajima_d(ac, size, start=0, stop=None, step=None, min_sites=3): """Calculate the value of Tajima's D in moving windows of `size` variants. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- d : ndarray, float, shape (n_windows,) Tajima's D. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> D = allel.moving_tajima_d(ac, size=4, step=2) >>> D array([0.1676558 , 2.01186954, 5.70029703]) """ d = moving_statistic(values=ac, statistic=tajima_d, size=size, start=start, stop=stop, step=step, min_sites=min_sites) return d
python
def moving_tajima_d(ac, size, start=0, stop=None, step=None, min_sites=3): """Calculate the value of Tajima's D in moving windows of `size` variants. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- d : ndarray, float, shape (n_windows,) Tajima's D. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> D = allel.moving_tajima_d(ac, size=4, step=2) >>> D array([0.1676558 , 2.01186954, 5.70029703]) """ d = moving_statistic(values=ac, statistic=tajima_d, size=size, start=start, stop=stop, step=step, min_sites=min_sites) return d
Calculate the value of Tajima's D in moving windows of `size` variants. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- d : ndarray, float, shape (n_windows,) Tajima's D. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> D = allel.moving_tajima_d(ac, size=4, step=2) >>> D array([0.1676558 , 2.01186954, 5.70029703])
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L1067-L1115
cggh/scikit-allel
allel/stats/roh.py
roh_mhmm
def roh_mhmm(gv, pos, phet_roh=0.001, phet_nonroh=(0.0025, 0.01), transition=1e-6, min_roh=0, is_accessible=None, contig_size=None): """Call ROH (runs of homozygosity) in a single individual given a genotype vector. This function computes the likely ROH using a Multinomial HMM model. There are 3 observable states at each position in a chromosome/contig: 0 = Hom, 1 = Het, 2 = inaccessible (i.e., unobserved). The model is provided with a probability of observing a het in a ROH (`phet_roh`) and one or more probabilities of observing a het in a non-ROH, as this probability may not be constant across the genome (`phet_nonroh`). Parameters ---------- gv : array_like, int, shape (n_variants, ploidy) Genotype vector. pos: array_like, int, shape (n_variants,) Positions of variants, same 0th dimension as `gv`. phet_roh: float, optional Probability of observing a heterozygote in a ROH. Appropriate values will depend on de novo mutation rate and genotype error rate. phet_nonroh: tuple of floats, optional One or more probabilites of observing a heterozygote outside of ROH. Appropriate values will depend primarily on nucleotide diversity within the population, but also on mutation rate and genotype error rate. transition: float, optional Probability of moving between states. min_roh: integer, optional Minimum size (bp) to condsider as a ROH. Will depend on contig size and recombination rate. is_accessible: array_like, bool, shape (`contig_size`,), optional Boolean array for each position in contig describing whether accessible or not. contig_size: int, optional If is_accessible not known/not provided, allows specification of total length of contig. Returns ------- df_roh: DataFrame Data frame where each row describes a run of homozygosity. Columns are 'start', 'stop', 'length' and 'is_marginal'. Start and stop are 1-based, stop-inclusive. froh: float Proportion of genome in a ROH. Notes ----- This function requires `hmmlearn <http://hmmlearn.readthedocs.io/en/latest/>`_ to be installed. This function currently requires around 4GB memory for a contig size of ~50Mbp. """ from hmmlearn import hmm # setup inputs if isinstance(phet_nonroh, float): phet_nonroh = phet_nonroh, gv = GenotypeVector(gv) pos = asarray_ndim(pos, 1) check_dim0_aligned(gv, pos) is_accessible = asarray_ndim(is_accessible, 1, dtype=bool) # heterozygote probabilities het_px = np.concatenate([(phet_roh,), phet_nonroh]) # start probabilities (all equal) start_prob = np.repeat(1/het_px.size, het_px.size) # transition between underlying states transition_mx = _hmm_derive_transition_matrix(transition, het_px.size) # probability of inaccessible if is_accessible is None: if contig_size is None: raise ValueError( "If is_accessibile argument is not provided, you must provide contig_size") p_accessible = 1.0 else: p_accessible = is_accessible.mean() contig_size = is_accessible.size emission_mx = _mhmm_derive_emission_matrix(het_px, p_accessible) # initialize HMM roh_hmm = hmm.MultinomialHMM(n_components=het_px.size) roh_hmm.n_symbols_ = 3 roh_hmm.startprob_ = start_prob roh_hmm.transmat_ = transition_mx roh_hmm.emissionprob_ = emission_mx # locate heterozygous calls is_het = gv.is_het() # predict ROH state pred, obs = _mhmm_predict_roh_state(roh_hmm, is_het, pos, is_accessible, contig_size) # find ROH windows df_blocks = tabulate_state_blocks(pred, states=list(range(len(het_px)))) df_roh = df_blocks[(df_blocks.state == 0)].reset_index(drop=True) # adapt the dataframe for ROH for col in 'state', 'support', 'start_lidx', 'stop_ridx', 'size_max': del df_roh[col] df_roh.rename(columns={'start_ridx': 'start', 'stop_lidx': 'stop', 'size_min': 'length'}, inplace=True) # make coordinates 1-based df_roh['start'] = df_roh['start'] + 1 df_roh['stop'] = df_roh['stop'] + 1 # filter by ROH size if min_roh > 0: df_roh = df_roh[df_roh.length >= min_roh] # compute FROH froh = df_roh.length.sum() / contig_size return df_roh, froh
python
def roh_mhmm(gv, pos, phet_roh=0.001, phet_nonroh=(0.0025, 0.01), transition=1e-6, min_roh=0, is_accessible=None, contig_size=None): """Call ROH (runs of homozygosity) in a single individual given a genotype vector. This function computes the likely ROH using a Multinomial HMM model. There are 3 observable states at each position in a chromosome/contig: 0 = Hom, 1 = Het, 2 = inaccessible (i.e., unobserved). The model is provided with a probability of observing a het in a ROH (`phet_roh`) and one or more probabilities of observing a het in a non-ROH, as this probability may not be constant across the genome (`phet_nonroh`). Parameters ---------- gv : array_like, int, shape (n_variants, ploidy) Genotype vector. pos: array_like, int, shape (n_variants,) Positions of variants, same 0th dimension as `gv`. phet_roh: float, optional Probability of observing a heterozygote in a ROH. Appropriate values will depend on de novo mutation rate and genotype error rate. phet_nonroh: tuple of floats, optional One or more probabilites of observing a heterozygote outside of ROH. Appropriate values will depend primarily on nucleotide diversity within the population, but also on mutation rate and genotype error rate. transition: float, optional Probability of moving between states. min_roh: integer, optional Minimum size (bp) to condsider as a ROH. Will depend on contig size and recombination rate. is_accessible: array_like, bool, shape (`contig_size`,), optional Boolean array for each position in contig describing whether accessible or not. contig_size: int, optional If is_accessible not known/not provided, allows specification of total length of contig. Returns ------- df_roh: DataFrame Data frame where each row describes a run of homozygosity. Columns are 'start', 'stop', 'length' and 'is_marginal'. Start and stop are 1-based, stop-inclusive. froh: float Proportion of genome in a ROH. Notes ----- This function requires `hmmlearn <http://hmmlearn.readthedocs.io/en/latest/>`_ to be installed. This function currently requires around 4GB memory for a contig size of ~50Mbp. """ from hmmlearn import hmm # setup inputs if isinstance(phet_nonroh, float): phet_nonroh = phet_nonroh, gv = GenotypeVector(gv) pos = asarray_ndim(pos, 1) check_dim0_aligned(gv, pos) is_accessible = asarray_ndim(is_accessible, 1, dtype=bool) # heterozygote probabilities het_px = np.concatenate([(phet_roh,), phet_nonroh]) # start probabilities (all equal) start_prob = np.repeat(1/het_px.size, het_px.size) # transition between underlying states transition_mx = _hmm_derive_transition_matrix(transition, het_px.size) # probability of inaccessible if is_accessible is None: if contig_size is None: raise ValueError( "If is_accessibile argument is not provided, you must provide contig_size") p_accessible = 1.0 else: p_accessible = is_accessible.mean() contig_size = is_accessible.size emission_mx = _mhmm_derive_emission_matrix(het_px, p_accessible) # initialize HMM roh_hmm = hmm.MultinomialHMM(n_components=het_px.size) roh_hmm.n_symbols_ = 3 roh_hmm.startprob_ = start_prob roh_hmm.transmat_ = transition_mx roh_hmm.emissionprob_ = emission_mx # locate heterozygous calls is_het = gv.is_het() # predict ROH state pred, obs = _mhmm_predict_roh_state(roh_hmm, is_het, pos, is_accessible, contig_size) # find ROH windows df_blocks = tabulate_state_blocks(pred, states=list(range(len(het_px)))) df_roh = df_blocks[(df_blocks.state == 0)].reset_index(drop=True) # adapt the dataframe for ROH for col in 'state', 'support', 'start_lidx', 'stop_ridx', 'size_max': del df_roh[col] df_roh.rename(columns={'start_ridx': 'start', 'stop_lidx': 'stop', 'size_min': 'length'}, inplace=True) # make coordinates 1-based df_roh['start'] = df_roh['start'] + 1 df_roh['stop'] = df_roh['stop'] + 1 # filter by ROH size if min_roh > 0: df_roh = df_roh[df_roh.length >= min_roh] # compute FROH froh = df_roh.length.sum() / contig_size return df_roh, froh
Call ROH (runs of homozygosity) in a single individual given a genotype vector. This function computes the likely ROH using a Multinomial HMM model. There are 3 observable states at each position in a chromosome/contig: 0 = Hom, 1 = Het, 2 = inaccessible (i.e., unobserved). The model is provided with a probability of observing a het in a ROH (`phet_roh`) and one or more probabilities of observing a het in a non-ROH, as this probability may not be constant across the genome (`phet_nonroh`). Parameters ---------- gv : array_like, int, shape (n_variants, ploidy) Genotype vector. pos: array_like, int, shape (n_variants,) Positions of variants, same 0th dimension as `gv`. phet_roh: float, optional Probability of observing a heterozygote in a ROH. Appropriate values will depend on de novo mutation rate and genotype error rate. phet_nonroh: tuple of floats, optional One or more probabilites of observing a heterozygote outside of ROH. Appropriate values will depend primarily on nucleotide diversity within the population, but also on mutation rate and genotype error rate. transition: float, optional Probability of moving between states. min_roh: integer, optional Minimum size (bp) to condsider as a ROH. Will depend on contig size and recombination rate. is_accessible: array_like, bool, shape (`contig_size`,), optional Boolean array for each position in contig describing whether accessible or not. contig_size: int, optional If is_accessible not known/not provided, allows specification of total length of contig. Returns ------- df_roh: DataFrame Data frame where each row describes a run of homozygosity. Columns are 'start', 'stop', 'length' and 'is_marginal'. Start and stop are 1-based, stop-inclusive. froh: float Proportion of genome in a ROH. Notes ----- This function requires `hmmlearn <http://hmmlearn.readthedocs.io/en/latest/>`_ to be installed. This function currently requires around 4GB memory for a contig size of ~50Mbp.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/roh.py#L12-L132
cggh/scikit-allel
allel/stats/roh.py
roh_poissonhmm
def roh_poissonhmm(gv, pos, phet_roh=0.001, phet_nonroh=(0.0025, 0.01), transition=1e-3, window_size=1000, min_roh=0, is_accessible=None, contig_size=None): """Call ROH (runs of homozygosity) in a single individual given a genotype vector. This function computes the likely ROH using a Poisson HMM model. The chromosome is divided into equally accessible windows of specified size, then the number of hets observed in each is used to fit a Poisson HMM. Note this is much faster than `roh_mhmm`, but at the cost of some resolution. The model is provided with a probability of observing a het in a ROH (`phet_roh`) and one or more probabilities of observing a het in a non-ROH, as this probability may not be constant across the genome (`phet_nonroh`). Parameters ---------- gv : array_like, int, shape (n_variants, ploidy) Genotype vector. pos: array_like, int, shape (n_variants,) Positions of variants, same 0th dimension as `gv`. phet_roh: float, optional Probability of observing a heterozygote in a ROH. Appropriate values will depend on de novo mutation rate and genotype error rate. phet_nonroh: tuple of floats, optional One or more probabilites of observing a heterozygote outside of ROH. Appropriate values will depend primarily on nucleotide diversity within the population, but also on mutation rate and genotype error rate. transition: float, optional Probability of moving between states. This is based on windows, so a larger window size may call for a larger transitional probability window_size: integer, optional Window size (equally accessible bases) to consider as a potential ROH. Setting this window too small may result in spurious ROH calls, while too large will result in a lack of resolution. min_roh: integer, optional Minimum size (bp) to condsider as a ROH. Will depend on contig size and recombination rate. is_accessible: array_like, bool, shape (`contig_size`,), optional Boolean array for each position in contig describing whether accessible or not. Although optional, highly recommended so invariant sites are distinguishable from sites where variation is inaccessible contig_size: integer, optional If is_accessible is not available, use this to specify the size of the contig, and assume all sites are accessible. Returns ------- df_roh: DataFrame Data frame where each row describes a run of homozygosity. Columns are 'start', 'stop', 'length' and 'is_marginal'. Start and stop are 1-based, stop-inclusive. froh: float Proportion of genome in a ROH. Notes ----- This function requires `pomegranate` (>= 0.9.0) to be installed. """ from pomegranate import HiddenMarkovModel, PoissonDistribution # equally accessbile windows if is_accessible is None: if contig_size is None: raise ValueError( "If is_accessibile argument is not provided, you must provide contig_size") is_accessible = np.ones((contig_size,), dtype="bool") else: contig_size = is_accessible.size eqw = equally_accessible_windows(is_accessible, window_size) ishet = GenotypeVector(gv).is_het() counts, wins, records = windowed_statistic(pos, ishet, np.sum, windows=eqw) # heterozygote probabilities het_px = np.concatenate([(phet_roh,), phet_nonroh]) # start probabilities (all equal) start_prob = np.repeat(1/het_px.size, het_px.size) # transition between underlying states transition_mx = _hmm_derive_transition_matrix(transition, het_px.size) dists = [PoissonDistribution(x * window_size) for x in het_px] model = HiddenMarkovModel.from_matrix(transition_probabilities=transition_mx, distributions=dists, starts=start_prob) prediction = np.array(model.predict(counts[:, None])) df_blocks = tabulate_state_blocks(prediction, states=list(range(len(het_px)))) df_roh = df_blocks[(df_blocks.state == 0)].reset_index(drop=True) # adapt the dataframe for ROH df_roh["start"] = df_roh.start_ridx.apply(lambda y: eqw[y, 0]) df_roh["stop"] = df_roh.stop_lidx.apply(lambda y: eqw[y, 1]) df_roh["length"] = df_roh.stop - df_roh.start # filter by ROH size if min_roh > 0: df_roh = df_roh[df_roh.length >= min_roh] # compute FROH froh = df_roh.length.sum() / contig_size return df_roh[["start", "stop", "length", "is_marginal"]], froh
python
def roh_poissonhmm(gv, pos, phet_roh=0.001, phet_nonroh=(0.0025, 0.01), transition=1e-3, window_size=1000, min_roh=0, is_accessible=None, contig_size=None): """Call ROH (runs of homozygosity) in a single individual given a genotype vector. This function computes the likely ROH using a Poisson HMM model. The chromosome is divided into equally accessible windows of specified size, then the number of hets observed in each is used to fit a Poisson HMM. Note this is much faster than `roh_mhmm`, but at the cost of some resolution. The model is provided with a probability of observing a het in a ROH (`phet_roh`) and one or more probabilities of observing a het in a non-ROH, as this probability may not be constant across the genome (`phet_nonroh`). Parameters ---------- gv : array_like, int, shape (n_variants, ploidy) Genotype vector. pos: array_like, int, shape (n_variants,) Positions of variants, same 0th dimension as `gv`. phet_roh: float, optional Probability of observing a heterozygote in a ROH. Appropriate values will depend on de novo mutation rate and genotype error rate. phet_nonroh: tuple of floats, optional One or more probabilites of observing a heterozygote outside of ROH. Appropriate values will depend primarily on nucleotide diversity within the population, but also on mutation rate and genotype error rate. transition: float, optional Probability of moving between states. This is based on windows, so a larger window size may call for a larger transitional probability window_size: integer, optional Window size (equally accessible bases) to consider as a potential ROH. Setting this window too small may result in spurious ROH calls, while too large will result in a lack of resolution. min_roh: integer, optional Minimum size (bp) to condsider as a ROH. Will depend on contig size and recombination rate. is_accessible: array_like, bool, shape (`contig_size`,), optional Boolean array for each position in contig describing whether accessible or not. Although optional, highly recommended so invariant sites are distinguishable from sites where variation is inaccessible contig_size: integer, optional If is_accessible is not available, use this to specify the size of the contig, and assume all sites are accessible. Returns ------- df_roh: DataFrame Data frame where each row describes a run of homozygosity. Columns are 'start', 'stop', 'length' and 'is_marginal'. Start and stop are 1-based, stop-inclusive. froh: float Proportion of genome in a ROH. Notes ----- This function requires `pomegranate` (>= 0.9.0) to be installed. """ from pomegranate import HiddenMarkovModel, PoissonDistribution # equally accessbile windows if is_accessible is None: if contig_size is None: raise ValueError( "If is_accessibile argument is not provided, you must provide contig_size") is_accessible = np.ones((contig_size,), dtype="bool") else: contig_size = is_accessible.size eqw = equally_accessible_windows(is_accessible, window_size) ishet = GenotypeVector(gv).is_het() counts, wins, records = windowed_statistic(pos, ishet, np.sum, windows=eqw) # heterozygote probabilities het_px = np.concatenate([(phet_roh,), phet_nonroh]) # start probabilities (all equal) start_prob = np.repeat(1/het_px.size, het_px.size) # transition between underlying states transition_mx = _hmm_derive_transition_matrix(transition, het_px.size) dists = [PoissonDistribution(x * window_size) for x in het_px] model = HiddenMarkovModel.from_matrix(transition_probabilities=transition_mx, distributions=dists, starts=start_prob) prediction = np.array(model.predict(counts[:, None])) df_blocks = tabulate_state_blocks(prediction, states=list(range(len(het_px)))) df_roh = df_blocks[(df_blocks.state == 0)].reset_index(drop=True) # adapt the dataframe for ROH df_roh["start"] = df_roh.start_ridx.apply(lambda y: eqw[y, 0]) df_roh["stop"] = df_roh.stop_lidx.apply(lambda y: eqw[y, 1]) df_roh["length"] = df_roh.stop - df_roh.start # filter by ROH size if min_roh > 0: df_roh = df_roh[df_roh.length >= min_roh] # compute FROH froh = df_roh.length.sum() / contig_size return df_roh[["start", "stop", "length", "is_marginal"]], froh
Call ROH (runs of homozygosity) in a single individual given a genotype vector. This function computes the likely ROH using a Poisson HMM model. The chromosome is divided into equally accessible windows of specified size, then the number of hets observed in each is used to fit a Poisson HMM. Note this is much faster than `roh_mhmm`, but at the cost of some resolution. The model is provided with a probability of observing a het in a ROH (`phet_roh`) and one or more probabilities of observing a het in a non-ROH, as this probability may not be constant across the genome (`phet_nonroh`). Parameters ---------- gv : array_like, int, shape (n_variants, ploidy) Genotype vector. pos: array_like, int, shape (n_variants,) Positions of variants, same 0th dimension as `gv`. phet_roh: float, optional Probability of observing a heterozygote in a ROH. Appropriate values will depend on de novo mutation rate and genotype error rate. phet_nonroh: tuple of floats, optional One or more probabilites of observing a heterozygote outside of ROH. Appropriate values will depend primarily on nucleotide diversity within the population, but also on mutation rate and genotype error rate. transition: float, optional Probability of moving between states. This is based on windows, so a larger window size may call for a larger transitional probability window_size: integer, optional Window size (equally accessible bases) to consider as a potential ROH. Setting this window too small may result in spurious ROH calls, while too large will result in a lack of resolution. min_roh: integer, optional Minimum size (bp) to condsider as a ROH. Will depend on contig size and recombination rate. is_accessible: array_like, bool, shape (`contig_size`,), optional Boolean array for each position in contig describing whether accessible or not. Although optional, highly recommended so invariant sites are distinguishable from sites where variation is inaccessible contig_size: integer, optional If is_accessible is not available, use this to specify the size of the contig, and assume all sites are accessible. Returns ------- df_roh: DataFrame Data frame where each row describes a run of homozygosity. Columns are 'start', 'stop', 'length' and 'is_marginal'. Start and stop are 1-based, stop-inclusive. froh: float Proportion of genome in a ROH. Notes ----- This function requires `pomegranate` (>= 0.9.0) to be installed.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/roh.py#L151-L258
cggh/scikit-allel
allel/stats/sf.py
sfs
def sfs(dac, n=None): """Compute the site frequency spectrum given derived allele counts at a set of biallelic variants. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs : ndarray, int, shape (n_chromosomes,) Array where the kth element is the number of variant sites with k derived alleles. """ # check input dac, n = _check_dac_n(dac, n) # need platform integer for bincount dac = dac.astype(int, copy=False) # compute site frequency spectrum x = n + 1 s = np.bincount(dac, minlength=x) return s
python
def sfs(dac, n=None): """Compute the site frequency spectrum given derived allele counts at a set of biallelic variants. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs : ndarray, int, shape (n_chromosomes,) Array where the kth element is the number of variant sites with k derived alleles. """ # check input dac, n = _check_dac_n(dac, n) # need platform integer for bincount dac = dac.astype(int, copy=False) # compute site frequency spectrum x = n + 1 s = np.bincount(dac, minlength=x) return s
Compute the site frequency spectrum given derived allele counts at a set of biallelic variants. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs : ndarray, int, shape (n_chromosomes,) Array where the kth element is the number of variant sites with k derived alleles.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L37-L66
cggh/scikit-allel
allel/stats/sf.py
sfs_folded
def sfs_folded(ac, n=None): """Compute the folded site frequency spectrum given reference and alternate allele counts at a set of biallelic variants. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded : ndarray, int, shape (n_chromosomes//2,) Array where the kth element is the number of variant sites with a minor allele count of k. """ # check input ac, n = _check_ac_n(ac, n) # compute minor allele counts mac = np.amin(ac, axis=1) # need platform integer for bincount mac = mac.astype(int, copy=False) # compute folded site frequency spectrum x = n//2 + 1 s = np.bincount(mac, minlength=x) return s
python
def sfs_folded(ac, n=None): """Compute the folded site frequency spectrum given reference and alternate allele counts at a set of biallelic variants. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded : ndarray, int, shape (n_chromosomes//2,) Array where the kth element is the number of variant sites with a minor allele count of k. """ # check input ac, n = _check_ac_n(ac, n) # compute minor allele counts mac = np.amin(ac, axis=1) # need platform integer for bincount mac = mac.astype(int, copy=False) # compute folded site frequency spectrum x = n//2 + 1 s = np.bincount(mac, minlength=x) return s
Compute the folded site frequency spectrum given reference and alternate allele counts at a set of biallelic variants. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded : ndarray, int, shape (n_chromosomes//2,) Array where the kth element is the number of variant sites with a minor allele count of k.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L69-L101
cggh/scikit-allel
allel/stats/sf.py
sfs_scaled
def sfs_scaled(dac, n=None): """Compute the site frequency spectrum scaled such that a constant value is expected across the spectrum for neutral variation and constant population size. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) An array where the value of the kth element is the number of variants with k derived alleles, multiplied by k. """ # compute site frequency spectrum s = sfs(dac, n=n) # apply scaling s = scale_sfs(s) return s
python
def sfs_scaled(dac, n=None): """Compute the site frequency spectrum scaled such that a constant value is expected across the spectrum for neutral variation and constant population size. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) An array where the value of the kth element is the number of variants with k derived alleles, multiplied by k. """ # compute site frequency spectrum s = sfs(dac, n=n) # apply scaling s = scale_sfs(s) return s
Compute the site frequency spectrum scaled such that a constant value is expected across the spectrum for neutral variation and constant population size. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) An array where the value of the kth element is the number of variants with k derived alleles, multiplied by k.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L104-L130
cggh/scikit-allel
allel/stats/sf.py
scale_sfs
def scale_sfs(s): """Scale a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) Scaled site frequency spectrum. """ k = np.arange(s.size) out = s * k return out
python
def scale_sfs(s): """Scale a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) Scaled site frequency spectrum. """ k = np.arange(s.size) out = s * k return out
Scale a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) Scaled site frequency spectrum.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L133-L149
cggh/scikit-allel
allel/stats/sf.py
sfs_folded_scaled
def sfs_folded_scaled(ac, n=None): """Compute the folded site frequency spectrum scaled such that a constant value is expected across the spectrum for neutral variation and constant population size. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,) An array where the value of the kth element is the number of variants with minor allele count k, multiplied by the scaling factor (k * (n - k) / n). """ # check input ac, n = _check_ac_n(ac, n) # compute the site frequency spectrum s = sfs_folded(ac, n=n) # apply scaling s = scale_sfs_folded(s, n) return s
python
def sfs_folded_scaled(ac, n=None): """Compute the folded site frequency spectrum scaled such that a constant value is expected across the spectrum for neutral variation and constant population size. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,) An array where the value of the kth element is the number of variants with minor allele count k, multiplied by the scaling factor (k * (n - k) / n). """ # check input ac, n = _check_ac_n(ac, n) # compute the site frequency spectrum s = sfs_folded(ac, n=n) # apply scaling s = scale_sfs_folded(s, n) return s
Compute the folded site frequency spectrum scaled such that a constant value is expected across the spectrum for neutral variation and constant population size. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,) An array where the value of the kth element is the number of variants with minor allele count k, multiplied by the scaling factor (k * (n - k) / n).
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L152-L182
cggh/scikit-allel
allel/stats/sf.py
scale_sfs_folded
def scale_sfs_folded(s, n): """Scale a folded site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes//2,) Folded site frequency spectrum. n : int Number of chromosomes called. Returns ------- sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,) Scaled folded site frequency spectrum. """ k = np.arange(s.shape[0]) out = s * k * (n - k) / n return out
python
def scale_sfs_folded(s, n): """Scale a folded site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes//2,) Folded site frequency spectrum. n : int Number of chromosomes called. Returns ------- sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,) Scaled folded site frequency spectrum. """ k = np.arange(s.shape[0]) out = s * k * (n - k) / n return out
Scale a folded site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes//2,) Folded site frequency spectrum. n : int Number of chromosomes called. Returns ------- sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,) Scaled folded site frequency spectrum.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L185-L203
cggh/scikit-allel
allel/stats/sf.py
joint_sfs
def joint_sfs(dac1, dac2, n1=None, n2=None): """Compute the joint site frequency spectrum between two populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs : ndarray, int, shape (m_chromosomes, n_chromosomes) Array where the (i, j)th element is the number of variant sites with i derived alleles in the first population and j derived alleles in the second population. """ # check inputs dac1, n1 = _check_dac_n(dac1, n1) dac2, n2 = _check_dac_n(dac2, n2) # compute site frequency spectrum x = n1 + 1 y = n2 + 1 # need platform integer for bincount tmp = (dac1 * y + dac2).astype(int, copy=False) s = np.bincount(tmp) s.resize(x, y) return s
python
def joint_sfs(dac1, dac2, n1=None, n2=None): """Compute the joint site frequency spectrum between two populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs : ndarray, int, shape (m_chromosomes, n_chromosomes) Array where the (i, j)th element is the number of variant sites with i derived alleles in the first population and j derived alleles in the second population. """ # check inputs dac1, n1 = _check_dac_n(dac1, n1) dac2, n2 = _check_dac_n(dac2, n2) # compute site frequency spectrum x = n1 + 1 y = n2 + 1 # need platform integer for bincount tmp = (dac1 * y + dac2).astype(int, copy=False) s = np.bincount(tmp) s.resize(x, y) return s
Compute the joint site frequency spectrum between two populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs : ndarray, int, shape (m_chromosomes, n_chromosomes) Array where the (i, j)th element is the number of variant sites with i derived alleles in the first population and j derived alleles in the second population.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L206-L238
cggh/scikit-allel
allel/stats/sf.py
joint_sfs_folded
def joint_sfs_folded(ac1, ac2, n1=None, n2=None): """Compute the joint folded site frequency spectrum between two populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, 2) Allele counts for the first population. ac2 : array_like, int, shape (n_variants, 2) Allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded : ndarray, int, shape (n1//2 + 1, n2//2 + 1) Array where the (i, j)th element is the number of variant sites with a minor allele count of i in the first population and j in the second population. """ # check inputs ac1, n1 = _check_ac_n(ac1, n1) ac2, n2 = _check_ac_n(ac2, n2) # compute minor allele counts mac1 = np.amin(ac1, axis=1) mac2 = np.amin(ac2, axis=1) # compute site frequency spectrum x = n1//2 + 1 y = n2//2 + 1 tmp = (mac1 * y + mac2).astype(int, copy=False) s = np.bincount(tmp) s.resize(x, y) return s
python
def joint_sfs_folded(ac1, ac2, n1=None, n2=None): """Compute the joint folded site frequency spectrum between two populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, 2) Allele counts for the first population. ac2 : array_like, int, shape (n_variants, 2) Allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded : ndarray, int, shape (n1//2 + 1, n2//2 + 1) Array where the (i, j)th element is the number of variant sites with a minor allele count of i in the first population and j in the second population. """ # check inputs ac1, n1 = _check_ac_n(ac1, n1) ac2, n2 = _check_ac_n(ac2, n2) # compute minor allele counts mac1 = np.amin(ac1, axis=1) mac2 = np.amin(ac2, axis=1) # compute site frequency spectrum x = n1//2 + 1 y = n2//2 + 1 tmp = (mac1 * y + mac2).astype(int, copy=False) s = np.bincount(tmp) s.resize(x, y) return s
Compute the joint folded site frequency spectrum between two populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, 2) Allele counts for the first population. ac2 : array_like, int, shape (n_variants, 2) Allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded : ndarray, int, shape (n1//2 + 1, n2//2 + 1) Array where the (i, j)th element is the number of variant sites with a minor allele count of i in the first population and j in the second population.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L241-L277
cggh/scikit-allel
allel/stats/sf.py
joint_sfs_scaled
def joint_sfs_scaled(dac1, dac2, n1=None, n2=None): """Compute the joint site frequency spectrum between two populations, scaled such that a constant value is expected across the spectrum for neutral variation, constant population size and unrelated populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_scaled : ndarray, int, shape (n1 + 1, n2 + 1) Array where the (i, j)th element is the scaled frequency of variant sites with i derived alleles in the first population and j derived alleles in the second population. """ # compute site frequency spectrum s = joint_sfs(dac1, dac2, n1=n1, n2=n2) # apply scaling s = scale_joint_sfs(s) return s
python
def joint_sfs_scaled(dac1, dac2, n1=None, n2=None): """Compute the joint site frequency spectrum between two populations, scaled such that a constant value is expected across the spectrum for neutral variation, constant population size and unrelated populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_scaled : ndarray, int, shape (n1 + 1, n2 + 1) Array where the (i, j)th element is the scaled frequency of variant sites with i derived alleles in the first population and j derived alleles in the second population. """ # compute site frequency spectrum s = joint_sfs(dac1, dac2, n1=n1, n2=n2) # apply scaling s = scale_joint_sfs(s) return s
Compute the joint site frequency spectrum between two populations, scaled such that a constant value is expected across the spectrum for neutral variation, constant population size and unrelated populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_scaled : ndarray, int, shape (n1 + 1, n2 + 1) Array where the (i, j)th element is the scaled frequency of variant sites with i derived alleles in the first population and j derived alleles in the second population.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L280-L309
cggh/scikit-allel
allel/stats/sf.py
scale_joint_sfs
def scale_joint_sfs(s): """Scale a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n1, n2) Joint site frequency spectrum. Returns ------- joint_sfs_scaled : ndarray, int, shape (n1, n2) Scaled joint site frequency spectrum. """ i = np.arange(s.shape[0])[:, None] j = np.arange(s.shape[1])[None, :] out = (s * i) * j return out
python
def scale_joint_sfs(s): """Scale a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n1, n2) Joint site frequency spectrum. Returns ------- joint_sfs_scaled : ndarray, int, shape (n1, n2) Scaled joint site frequency spectrum. """ i = np.arange(s.shape[0])[:, None] j = np.arange(s.shape[1])[None, :] out = (s * i) * j return out
Scale a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n1, n2) Joint site frequency spectrum. Returns ------- joint_sfs_scaled : ndarray, int, shape (n1, n2) Scaled joint site frequency spectrum.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L312-L330
cggh/scikit-allel
allel/stats/sf.py
joint_sfs_folded_scaled
def joint_sfs_folded_scaled(ac1, ac2, n1=None, n2=None): """Compute the joint folded site frequency spectrum between two populations, scaled such that a constant value is expected across the spectrum for neutral variation, constant population size and unrelated populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, 2) Allele counts for the first population. ac2 : array_like, int, shape (n_variants, 2) Allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded_scaled : ndarray, int, shape (n1//2 + 1, n2//2 + 1) Array where the (i, j)th element is the scaled frequency of variant sites with a minor allele count of i in the first population and j in the second population. """ # noqa # check inputs ac1, n1 = _check_ac_n(ac1, n1) ac2, n2 = _check_ac_n(ac2, n2) # compute site frequency spectrum s = joint_sfs_folded(ac1, ac2, n1=n1, n2=n2) # apply scaling s = scale_joint_sfs_folded(s, n1, n2) return s
python
def joint_sfs_folded_scaled(ac1, ac2, n1=None, n2=None): """Compute the joint folded site frequency spectrum between two populations, scaled such that a constant value is expected across the spectrum for neutral variation, constant population size and unrelated populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, 2) Allele counts for the first population. ac2 : array_like, int, shape (n_variants, 2) Allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded_scaled : ndarray, int, shape (n1//2 + 1, n2//2 + 1) Array where the (i, j)th element is the scaled frequency of variant sites with a minor allele count of i in the first population and j in the second population. """ # noqa # check inputs ac1, n1 = _check_ac_n(ac1, n1) ac2, n2 = _check_ac_n(ac2, n2) # compute site frequency spectrum s = joint_sfs_folded(ac1, ac2, n1=n1, n2=n2) # apply scaling s = scale_joint_sfs_folded(s, n1, n2) return s
Compute the joint folded site frequency spectrum between two populations, scaled such that a constant value is expected across the spectrum for neutral variation, constant population size and unrelated populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, 2) Allele counts for the first population. ac2 : array_like, int, shape (n_variants, 2) Allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded_scaled : ndarray, int, shape (n1//2 + 1, n2//2 + 1) Array where the (i, j)th element is the scaled frequency of variant sites with a minor allele count of i in the first population and j in the second population.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L333-L367
cggh/scikit-allel
allel/stats/sf.py
scale_joint_sfs_folded
def scale_joint_sfs_folded(s, n1, n2): """Scale a folded joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (m_chromosomes//2, n_chromosomes//2) Folded joint site frequency spectrum. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded_scaled : ndarray, int, shape (m_chromosomes//2, n_chromosomes//2) Scaled folded joint site frequency spectrum. """ # noqa out = np.empty_like(s) for i in range(s.shape[0]): for j in range(s.shape[1]): out[i, j] = s[i, j] * i * j * (n1 - i) * (n2 - j) return out
python
def scale_joint_sfs_folded(s, n1, n2): """Scale a folded joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (m_chromosomes//2, n_chromosomes//2) Folded joint site frequency spectrum. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded_scaled : ndarray, int, shape (m_chromosomes//2, n_chromosomes//2) Scaled folded joint site frequency spectrum. """ # noqa out = np.empty_like(s) for i in range(s.shape[0]): for j in range(s.shape[1]): out[i, j] = s[i, j] * i * j * (n1 - i) * (n2 - j) return out
Scale a folded joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (m_chromosomes//2, n_chromosomes//2) Folded joint site frequency spectrum. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded_scaled : ndarray, int, shape (m_chromosomes//2, n_chromosomes//2) Scaled folded joint site frequency spectrum.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L370-L390
cggh/scikit-allel
allel/stats/sf.py
fold_sfs
def fold_sfs(s, n): """Fold a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum n : int Total number of chromosomes called. Returns ------- sfs_folded : ndarray, int Folded site frequency spectrum """ # check inputs s = asarray_ndim(s, 1) assert s.shape[0] <= n + 1, 'invalid number of chromosomes' # need to check s has all entries up to n if s.shape[0] < n + 1: sn = np.zeros(n + 1, dtype=s.dtype) sn[:s.shape[0]] = s s = sn # fold nf = (n + 1) // 2 n = nf * 2 o = s[:nf] + s[nf:n][::-1] return o
python
def fold_sfs(s, n): """Fold a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum n : int Total number of chromosomes called. Returns ------- sfs_folded : ndarray, int Folded site frequency spectrum """ # check inputs s = asarray_ndim(s, 1) assert s.shape[0] <= n + 1, 'invalid number of chromosomes' # need to check s has all entries up to n if s.shape[0] < n + 1: sn = np.zeros(n + 1, dtype=s.dtype) sn[:s.shape[0]] = s s = sn # fold nf = (n + 1) // 2 n = nf * 2 o = s[:nf] + s[nf:n][::-1] return o
Fold a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum n : int Total number of chromosomes called. Returns ------- sfs_folded : ndarray, int Folded site frequency spectrum
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L393-L425
cggh/scikit-allel
allel/stats/sf.py
fold_joint_sfs
def fold_joint_sfs(s, n1, n2): """Fold a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (m_chromosomes, n_chromosomes) Joint site frequency spectrum. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded : ndarray, int Folded joint site frequency spectrum. """ # check inputs s = asarray_ndim(s, 2) assert s.shape[0] <= n1 + 1, 'invalid number of chromosomes' assert s.shape[1] <= n2 + 1, 'invalid number of chromosomes' # need to check s has all entries up to m if s.shape[0] < n1 + 1: sm = np.zeros((n1 + 1, s.shape[1]), dtype=s.dtype) sm[:s.shape[0]] = s s = sm # need to check s has all entries up to n if s.shape[1] < n2 + 1: sn = np.zeros((s.shape[0], n2 + 1), dtype=s.dtype) sn[:, :s.shape[1]] = s s = sn # fold mf = (n1 + 1) // 2 nf = (n2 + 1) // 2 n1 = mf * 2 n2 = nf * 2 o = (s[:mf, :nf] + # top left s[mf:n1, :nf][::-1] + # top right s[:mf, nf:n2][:, ::-1] + # bottom left s[mf:n1, nf:n2][::-1, ::-1]) # bottom right return o
python
def fold_joint_sfs(s, n1, n2): """Fold a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (m_chromosomes, n_chromosomes) Joint site frequency spectrum. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded : ndarray, int Folded joint site frequency spectrum. """ # check inputs s = asarray_ndim(s, 2) assert s.shape[0] <= n1 + 1, 'invalid number of chromosomes' assert s.shape[1] <= n2 + 1, 'invalid number of chromosomes' # need to check s has all entries up to m if s.shape[0] < n1 + 1: sm = np.zeros((n1 + 1, s.shape[1]), dtype=s.dtype) sm[:s.shape[0]] = s s = sm # need to check s has all entries up to n if s.shape[1] < n2 + 1: sn = np.zeros((s.shape[0], n2 + 1), dtype=s.dtype) sn[:, :s.shape[1]] = s s = sn # fold mf = (n1 + 1) // 2 nf = (n2 + 1) // 2 n1 = mf * 2 n2 = nf * 2 o = (s[:mf, :nf] + # top left s[mf:n1, :nf][::-1] + # top right s[:mf, nf:n2][:, ::-1] + # bottom left s[mf:n1, nf:n2][::-1, ::-1]) # bottom right return o
Fold a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (m_chromosomes, n_chromosomes) Joint site frequency spectrum. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded : ndarray, int Folded joint site frequency spectrum.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L428-L472
cggh/scikit-allel
allel/stats/sf.py
plot_sfs
def plot_sfs(s, yscale='log', bins=None, n=None, clip_endpoints=True, label=None, plot_kwargs=None, ax=None): """Plot a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ import matplotlib.pyplot as plt import scipy # check inputs s = asarray_ndim(s, 1) # setup axes if ax is None: fig, ax = plt.subplots() # setup data if bins is None: if clip_endpoints: x = np.arange(1, s.shape[0]-1) y = s[1:-1] else: x = np.arange(s.shape[0]) y = s else: if clip_endpoints: y, b, _ = scipy.stats.binned_statistic( np.arange(1, s.shape[0]-1), values=s[1:-1], bins=bins, statistic='sum') else: y, b, _ = scipy.stats.binned_statistic( np.arange(s.shape[0]), values=s, bins=bins, statistic='sum') # use bin midpoints for plotting x = (b[:-1] + b[1:]) / 2 if n: # convert allele counts to allele frequencies x = x / n ax.set_xlabel('derived allele frequency') else: ax.set_xlabel('derived allele count') # do plotting if plot_kwargs is None: plot_kwargs = dict() ax.plot(x, y, label=label, **plot_kwargs) # tidy ax.set_yscale(yscale) ax.set_ylabel('site frequency') ax.autoscale(axis='x', tight=True) return ax
python
def plot_sfs(s, yscale='log', bins=None, n=None, clip_endpoints=True, label=None, plot_kwargs=None, ax=None): """Plot a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ import matplotlib.pyplot as plt import scipy # check inputs s = asarray_ndim(s, 1) # setup axes if ax is None: fig, ax = plt.subplots() # setup data if bins is None: if clip_endpoints: x = np.arange(1, s.shape[0]-1) y = s[1:-1] else: x = np.arange(s.shape[0]) y = s else: if clip_endpoints: y, b, _ = scipy.stats.binned_statistic( np.arange(1, s.shape[0]-1), values=s[1:-1], bins=bins, statistic='sum') else: y, b, _ = scipy.stats.binned_statistic( np.arange(s.shape[0]), values=s, bins=bins, statistic='sum') # use bin midpoints for plotting x = (b[:-1] + b[1:]) / 2 if n: # convert allele counts to allele frequencies x = x / n ax.set_xlabel('derived allele frequency') else: ax.set_xlabel('derived allele count') # do plotting if plot_kwargs is None: plot_kwargs = dict() ax.plot(x, y, label=label, **plot_kwargs) # tidy ax.set_yscale(yscale) ax.set_ylabel('site frequency') ax.autoscale(axis='x', tight=True) return ax
Plot a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L475-L558
cggh/scikit-allel
allel/stats/sf.py
plot_sfs_folded
def plot_sfs_folded(*args, **kwargs): """Plot a folded site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes/2,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ ax = plot_sfs(*args, **kwargs) n = kwargs.get('n', None) if n: ax.set_xlabel('minor allele frequency') else: ax.set_xlabel('minor allele count') return ax
python
def plot_sfs_folded(*args, **kwargs): """Plot a folded site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes/2,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ ax = plot_sfs(*args, **kwargs) n = kwargs.get('n', None) if n: ax.set_xlabel('minor allele frequency') else: ax.set_xlabel('minor allele count') return ax
Plot a folded site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes/2,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L562-L598
cggh/scikit-allel
allel/stats/sf.py
plot_sfs_scaled
def plot_sfs_scaled(*args, **kwargs): """Plot a scaled site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ kwargs.setdefault('yscale', 'linear') ax = plot_sfs(*args, **kwargs) ax.set_ylabel('scaled site frequency') return ax
python
def plot_sfs_scaled(*args, **kwargs): """Plot a scaled site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ kwargs.setdefault('yscale', 'linear') ax = plot_sfs(*args, **kwargs) ax.set_ylabel('scaled site frequency') return ax
Plot a scaled site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L602-L634
cggh/scikit-allel
allel/stats/sf.py
plot_sfs_folded_scaled
def plot_sfs_folded_scaled(*args, **kwargs): """Plot a folded scaled site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes/2,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ kwargs.setdefault('yscale', 'linear') ax = plot_sfs_folded(*args, **kwargs) ax.set_ylabel('scaled site frequency') n = kwargs.get('n', None) if n: ax.set_xlabel('minor allele frequency') else: ax.set_xlabel('minor allele count') return ax
python
def plot_sfs_folded_scaled(*args, **kwargs): """Plot a folded scaled site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes/2,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn. """ kwargs.setdefault('yscale', 'linear') ax = plot_sfs_folded(*args, **kwargs) ax.set_ylabel('scaled site frequency') n = kwargs.get('n', None) if n: ax.set_xlabel('minor allele frequency') else: ax.set_xlabel('minor allele count') return ax
Plot a folded scaled site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes/2,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L638-L675
cggh/scikit-allel
allel/stats/sf.py
plot_joint_sfs
def plot_joint_sfs(s, ax=None, imshow_kwargs=None): """Plot a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ import matplotlib.pyplot as plt from matplotlib.colors import LogNorm # check inputs s = asarray_ndim(s, 2) # setup axes if ax is None: w = plt.rcParams['figure.figsize'][0] fig, ax = plt.subplots(figsize=(w, w)) # set plotting defaults if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('cmap', 'jet') imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('aspect', 'auto') imshow_kwargs.setdefault('norm', LogNorm()) # plot data ax.imshow(s.T, **imshow_kwargs) # tidy ax.invert_yaxis() ax.set_xlabel('derived allele count (population 1)') ax.set_ylabel('derived allele count (population 2)') return ax
python
def plot_joint_sfs(s, ax=None, imshow_kwargs=None): """Plot a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ import matplotlib.pyplot as plt from matplotlib.colors import LogNorm # check inputs s = asarray_ndim(s, 2) # setup axes if ax is None: w = plt.rcParams['figure.figsize'][0] fig, ax = plt.subplots(figsize=(w, w)) # set plotting defaults if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('cmap', 'jet') imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('aspect', 'auto') imshow_kwargs.setdefault('norm', LogNorm()) # plot data ax.imshow(s.T, **imshow_kwargs) # tidy ax.invert_yaxis() ax.set_xlabel('derived allele count (population 1)') ax.set_ylabel('derived allele count (population 2)') return ax
Plot a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L678-L724
cggh/scikit-allel
allel/stats/sf.py
plot_joint_sfs_folded
def plot_joint_sfs_folded(*args, **kwargs): """Plot a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1/2, n_chromosomes_pop2/2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ ax = plot_joint_sfs(*args, **kwargs) ax.set_xlabel('minor allele count (population 1)') ax.set_ylabel('minor allele count (population 2)') return ax
python
def plot_joint_sfs_folded(*args, **kwargs): """Plot a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1/2, n_chromosomes_pop2/2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ ax = plot_joint_sfs(*args, **kwargs) ax.set_xlabel('minor allele count (population 1)') ax.set_ylabel('minor allele count (population 2)') return ax
Plot a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1/2, n_chromosomes_pop2/2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L728-L749
cggh/scikit-allel
allel/stats/sf.py
plot_joint_sfs_scaled
def plot_joint_sfs_scaled(*args, **kwargs): """Plot a scaled joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ imshow_kwargs = kwargs.get('imshow_kwargs', dict()) imshow_kwargs.setdefault('norm', None) kwargs['imshow_kwargs'] = imshow_kwargs ax = plot_joint_sfs(*args, **kwargs) return ax
python
def plot_joint_sfs_scaled(*args, **kwargs): """Plot a scaled joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ imshow_kwargs = kwargs.get('imshow_kwargs', dict()) imshow_kwargs.setdefault('norm', None) kwargs['imshow_kwargs'] = imshow_kwargs ax = plot_joint_sfs(*args, **kwargs) return ax
Plot a scaled joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L753-L775
cggh/scikit-allel
allel/stats/sf.py
plot_joint_sfs_folded_scaled
def plot_joint_sfs_folded_scaled(*args, **kwargs): """Plot a scaled folded joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1/2, n_chromosomes_pop2/2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ imshow_kwargs = kwargs.get('imshow_kwargs', dict()) imshow_kwargs.setdefault('norm', None) kwargs['imshow_kwargs'] = imshow_kwargs ax = plot_joint_sfs_folded(*args, **kwargs) ax.set_xlabel('minor allele count (population 1)') ax.set_ylabel('minor allele count (population 2)') return ax
python
def plot_joint_sfs_folded_scaled(*args, **kwargs): """Plot a scaled folded joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1/2, n_chromosomes_pop2/2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn. """ imshow_kwargs = kwargs.get('imshow_kwargs', dict()) imshow_kwargs.setdefault('norm', None) kwargs['imshow_kwargs'] = imshow_kwargs ax = plot_joint_sfs_folded(*args, **kwargs) ax.set_xlabel('minor allele count (population 1)') ax.set_ylabel('minor allele count (population 2)') return ax
Plot a scaled folded joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1/2, n_chromosomes_pop2/2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L779-L803
cggh/scikit-allel
allel/compat.py
memoryview_safe
def memoryview_safe(x): """Make array safe to run in a Cython memoryview-based kernel. These kernels typically break down with the error ``ValueError: buffer source array is read-only`` when running in dask distributed. See Also -------- https://github.com/dask/distributed/issues/1978 https://github.com/cggh/scikit-allel/issues/206 """ if not x.flags.writeable: if not x.flags.owndata: x = x.copy(order='A') x.setflags(write=True) return x
python
def memoryview_safe(x): """Make array safe to run in a Cython memoryview-based kernel. These kernels typically break down with the error ``ValueError: buffer source array is read-only`` when running in dask distributed. See Also -------- https://github.com/dask/distributed/issues/1978 https://github.com/cggh/scikit-allel/issues/206 """ if not x.flags.writeable: if not x.flags.owndata: x = x.copy(order='A') x.setflags(write=True) return x
Make array safe to run in a Cython memoryview-based kernel. These kernels typically break down with the error ``ValueError: buffer source array is read-only`` when running in dask distributed. See Also -------- https://github.com/dask/distributed/issues/1978 https://github.com/cggh/scikit-allel/issues/206
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/compat.py#L51-L66
cggh/scikit-allel
allel/io/vcf_read.py
_prep_fields_param
def _prep_fields_param(fields): """Prepare the `fields` parameter, and determine whether or not to store samples.""" store_samples = False if fields is None: # add samples by default return True, None if isinstance(fields, str): fields = [fields] else: fields = list(fields) if 'samples' in fields: fields.remove('samples') store_samples = True elif '*' in fields: store_samples = True return store_samples, fields
python
def _prep_fields_param(fields): """Prepare the `fields` parameter, and determine whether or not to store samples.""" store_samples = False if fields is None: # add samples by default return True, None if isinstance(fields, str): fields = [fields] else: fields = list(fields) if 'samples' in fields: fields.remove('samples') store_samples = True elif '*' in fields: store_samples = True return store_samples, fields
Prepare the `fields` parameter, and determine whether or not to store samples.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L51-L71
cggh/scikit-allel
allel/io/vcf_read.py
_chunk_iter_progress
def _chunk_iter_progress(it, log, prefix): """Wrap a chunk iterator for progress logging.""" n_variants = 0 before_all = time.time() before_chunk = before_all for chunk, chunk_length, chrom, pos in it: after_chunk = time.time() elapsed_chunk = after_chunk - before_chunk elapsed = after_chunk - before_all n_variants += chunk_length chrom = text_type(chrom, 'utf8') message = ( '%s %s rows in %.2fs; chunk in %.2fs (%s rows/s)' % (prefix, n_variants, elapsed, elapsed_chunk, int(chunk_length // elapsed_chunk)) ) if chrom: message += '; %s:%s' % (chrom, pos) print(message, file=log) log.flush() yield chunk, chunk_length, chrom, pos before_chunk = after_chunk after_all = time.time() elapsed = after_all - before_all print('%s all done (%s rows/s)' % (prefix, int(n_variants // elapsed)), file=log) log.flush()
python
def _chunk_iter_progress(it, log, prefix): """Wrap a chunk iterator for progress logging.""" n_variants = 0 before_all = time.time() before_chunk = before_all for chunk, chunk_length, chrom, pos in it: after_chunk = time.time() elapsed_chunk = after_chunk - before_chunk elapsed = after_chunk - before_all n_variants += chunk_length chrom = text_type(chrom, 'utf8') message = ( '%s %s rows in %.2fs; chunk in %.2fs (%s rows/s)' % (prefix, n_variants, elapsed, elapsed_chunk, int(chunk_length // elapsed_chunk)) ) if chrom: message += '; %s:%s' % (chrom, pos) print(message, file=log) log.flush() yield chunk, chunk_length, chrom, pos before_chunk = after_chunk after_all = time.time() elapsed = after_all - before_all print('%s all done (%s rows/s)' % (prefix, int(n_variants // elapsed)), file=log) log.flush()
Wrap a chunk iterator for progress logging.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L74-L100
cggh/scikit-allel
allel/io/vcf_read.py
read_vcf
def read_vcf(input, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None): """Read data from a VCF file into NumPy arrays. .. versionchanged:: 1.12.0 Now returns None if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string or file-like {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- data : dict[str, ndarray] A dictionary holding arrays, or None if no variants were found. """ # samples requested? # noinspection PyTypeChecker store_samples, fields = _prep_fields_param(fields) # setup fields, samples, headers, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) # handle field renaming if rename_fields: rename_fields, it = _do_rename(it, fields=fields, rename_fields=rename_fields, headers=headers) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[read_vcf]') # read all chunks into a list chunks = [d[0] for d in it] if chunks: # setup output output = dict() if len(samples) > 0 and store_samples: output['samples'] = samples # find array keys keys = sorted(chunks[0].keys()) # concatenate chunks for k in keys: output[k] = np.concatenate([chunk[k] for chunk in chunks], axis=0) else: output = None return output
python
def read_vcf(input, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None): """Read data from a VCF file into NumPy arrays. .. versionchanged:: 1.12.0 Now returns None if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string or file-like {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- data : dict[str, ndarray] A dictionary holding arrays, or None if no variants were found. """ # samples requested? # noinspection PyTypeChecker store_samples, fields = _prep_fields_param(fields) # setup fields, samples, headers, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) # handle field renaming if rename_fields: rename_fields, it = _do_rename(it, fields=fields, rename_fields=rename_fields, headers=headers) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[read_vcf]') # read all chunks into a list chunks = [d[0] for d in it] if chunks: # setup output output = dict() if len(samples) > 0 and store_samples: output['samples'] = samples # find array keys keys = sorted(chunks[0].keys()) # concatenate chunks for k in keys: output[k] = np.concatenate([chunk[k] for chunk in chunks], axis=0) else: output = None return output
Read data from a VCF file into NumPy arrays. .. versionchanged:: 1.12.0 Now returns None if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string or file-like {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- data : dict[str, ndarray] A dictionary holding arrays, or None if no variants were found.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L240-L345
cggh/scikit-allel
allel/io/vcf_read.py
vcf_to_npz
def vcf_to_npz(input, output, compressed=True, overwrite=False, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix=True, samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None): """Read data from a VCF file into NumPy arrays and save as a .npz file. .. versionchanged:: 1.12.0 Now will not create any output file if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string {input} output : string {output} compressed : bool, optional If True (default), save with compression. overwrite : bool, optional {overwrite} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} """ # guard condition if not overwrite and os.path.exists(output): raise ValueError('file exists at path %r; use overwrite=True to replace' % output) # read all data into memory data = read_vcf( input=input, fields=fields, exclude_fields=exclude_fields, rename_fields=rename_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, log=log, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) if data is None: # no data, bail out return # setup save function if compressed: savez = np.savez_compressed else: savez = np.savez # save as npz savez(output, **data)
python
def vcf_to_npz(input, output, compressed=True, overwrite=False, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix=True, samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None): """Read data from a VCF file into NumPy arrays and save as a .npz file. .. versionchanged:: 1.12.0 Now will not create any output file if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string {input} output : string {output} compressed : bool, optional If True (default), save with compression. overwrite : bool, optional {overwrite} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} """ # guard condition if not overwrite and os.path.exists(output): raise ValueError('file exists at path %r; use overwrite=True to replace' % output) # read all data into memory data = read_vcf( input=input, fields=fields, exclude_fields=exclude_fields, rename_fields=rename_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, log=log, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) if data is None: # no data, bail out return # setup save function if compressed: savez = np.savez_compressed else: savez = np.savez # save as npz savez(output, **data)
Read data from a VCF file into NumPy arrays and save as a .npz file. .. versionchanged:: 1.12.0 Now will not create any output file if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string {input} output : string {output} compressed : bool, optional If True (default), save with compression. overwrite : bool, optional {overwrite} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log}
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L375-L463
cggh/scikit-allel
allel/io/vcf_read.py
vcf_to_hdf5
def vcf_to_hdf5(input, output, group='/', compression='gzip', compression_opts=1, shuffle=False, overwrite=False, vlen=True, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, chunk_width=DEFAULT_CHUNK_WIDTH, log=None): """Read data from a VCF file and load into an HDF5 file. .. versionchanged:: 1.12.0 Now will not create any output file if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string {input} output : string {output} group : string Group within destination HDF5 file to store data in. compression : string Compression algorithm, e.g., 'gzip' (default). compression_opts : int Compression level, e.g., 1 (default). shuffle : bool Use byte shuffling, which may improve compression (default is False). overwrite : bool {overwrite} vlen : bool If True, store variable length strings. Note that there is considerable storage overhead for variable length strings in HDF5, and leaving this option as True ( default) may lead to large file sizes. If False, all strings will be stored in the HDF5 file as fixed length strings, even if they are specified as 'object' type. In this case, the string length for any field with 'object' type will be determined based on the maximum length of strings found in the first chunk, and this may cause values to be truncated if longer values are found in later chunks. To avoid truncation and large file sizes, manually set the type for all string fields to an explicit fixed length string type, e.g., 'S10' for a field where you know at most 10 characters are required. fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} chunk_width : int, optional {chunk_width} log : file-like, optional {log} """ import h5py # samples requested? # noinspection PyTypeChecker store_samples, fields = _prep_fields_param(fields) # setup chunk iterator fields, samples, headers, it = iter_vcf_chunks( input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) # handle field renaming if rename_fields: rename_fields, it = _do_rename(it, fields=fields, rename_fields=rename_fields, headers=headers) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_hdf5]') # read first chunk try: chunk, _, _, _ = next(it) except StopIteration: # no data, bail out return with h5py.File(output, mode='a') as h5f: # obtain root group that data will be stored into root = h5f.require_group(group) if len(samples) > 0 and store_samples: # store samples name = 'samples' if name in root: if overwrite: del root[name] else: raise ValueError( 'dataset exists at path %r; use overwrite=True to replace' % name) if samples.dtype.kind == 'O': if vlen: t = h5py.special_dtype(vlen=str) else: samples = samples.astype('S') t = samples.dtype else: t = samples.dtype root.create_dataset(name, data=samples, chunks=None, dtype=t) # setup datasets # noinspection PyTypeChecker keys = _hdf5_setup_datasets( chunk=chunk, root=root, chunk_length=chunk_length, chunk_width=chunk_width, compression=compression, compression_opts=compression_opts, shuffle=shuffle, overwrite=overwrite, headers=headers, vlen=vlen ) # store first chunk _hdf5_store_chunk(root, keys, chunk, vlen) # store remaining chunks for chunk, _, _, _ in it: _hdf5_store_chunk(root, keys, chunk, vlen)
python
def vcf_to_hdf5(input, output, group='/', compression='gzip', compression_opts=1, shuffle=False, overwrite=False, vlen=True, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, chunk_width=DEFAULT_CHUNK_WIDTH, log=None): """Read data from a VCF file and load into an HDF5 file. .. versionchanged:: 1.12.0 Now will not create any output file if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string {input} output : string {output} group : string Group within destination HDF5 file to store data in. compression : string Compression algorithm, e.g., 'gzip' (default). compression_opts : int Compression level, e.g., 1 (default). shuffle : bool Use byte shuffling, which may improve compression (default is False). overwrite : bool {overwrite} vlen : bool If True, store variable length strings. Note that there is considerable storage overhead for variable length strings in HDF5, and leaving this option as True ( default) may lead to large file sizes. If False, all strings will be stored in the HDF5 file as fixed length strings, even if they are specified as 'object' type. In this case, the string length for any field with 'object' type will be determined based on the maximum length of strings found in the first chunk, and this may cause values to be truncated if longer values are found in later chunks. To avoid truncation and large file sizes, manually set the type for all string fields to an explicit fixed length string type, e.g., 'S10' for a field where you know at most 10 characters are required. fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} chunk_width : int, optional {chunk_width} log : file-like, optional {log} """ import h5py # samples requested? # noinspection PyTypeChecker store_samples, fields = _prep_fields_param(fields) # setup chunk iterator fields, samples, headers, it = iter_vcf_chunks( input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) # handle field renaming if rename_fields: rename_fields, it = _do_rename(it, fields=fields, rename_fields=rename_fields, headers=headers) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_hdf5]') # read first chunk try: chunk, _, _, _ = next(it) except StopIteration: # no data, bail out return with h5py.File(output, mode='a') as h5f: # obtain root group that data will be stored into root = h5f.require_group(group) if len(samples) > 0 and store_samples: # store samples name = 'samples' if name in root: if overwrite: del root[name] else: raise ValueError( 'dataset exists at path %r; use overwrite=True to replace' % name) if samples.dtype.kind == 'O': if vlen: t = h5py.special_dtype(vlen=str) else: samples = samples.astype('S') t = samples.dtype else: t = samples.dtype root.create_dataset(name, data=samples, chunks=None, dtype=t) # setup datasets # noinspection PyTypeChecker keys = _hdf5_setup_datasets( chunk=chunk, root=root, chunk_length=chunk_length, chunk_width=chunk_width, compression=compression, compression_opts=compression_opts, shuffle=shuffle, overwrite=overwrite, headers=headers, vlen=vlen ) # store first chunk _hdf5_store_chunk(root, keys, chunk, vlen) # store remaining chunks for chunk, _, _, _ in it: _hdf5_store_chunk(root, keys, chunk, vlen)
Read data from a VCF file and load into an HDF5 file. .. versionchanged:: 1.12.0 Now will not create any output file if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string {input} output : string {output} group : string Group within destination HDF5 file to store data in. compression : string Compression algorithm, e.g., 'gzip' (default). compression_opts : int Compression level, e.g., 1 (default). shuffle : bool Use byte shuffling, which may improve compression (default is False). overwrite : bool {overwrite} vlen : bool If True, store variable length strings. Note that there is considerable storage overhead for variable length strings in HDF5, and leaving this option as True ( default) may lead to large file sizes. If False, all strings will be stored in the HDF5 file as fixed length strings, even if they are specified as 'object' type. In this case, the string length for any field with 'object' type will be determined based on the maximum length of strings found in the first chunk, and this may cause values to be truncated if longer values are found in later chunks. To avoid truncation and large file sizes, manually set the type for all string fields to an explicit fixed length string type, e.g., 'S10' for a field where you know at most 10 characters are required. fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} chunk_width : int, optional {chunk_width} log : file-like, optional {log}
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L600-L757
cggh/scikit-allel
allel/io/vcf_read.py
vcf_to_zarr
def vcf_to_zarr(input, output, group='/', compressor='default', overwrite=False, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, chunk_width=DEFAULT_CHUNK_WIDTH, log=None): """Read data from a VCF file and load into a Zarr on-disk store. .. versionchanged:: 1.12.0 Now will not create any output files if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string {input} output : string {output} group : string Group within destination Zarr hierarchy to store data in. compressor : compressor Compression algorithm, e.g., zarr.Blosc(cname='zstd', clevel=1, shuffle=1). overwrite : bool {overwrite} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} chunk_width : int, optional {chunk_width} log : file-like, optional {log} """ import zarr # samples requested? # noinspection PyTypeChecker store_samples, fields = _prep_fields_param(fields) # setup chunk iterator fields, samples, headers, it = iter_vcf_chunks( input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) # handle field renaming if rename_fields: rename_fields, it = _do_rename(it, fields=fields, rename_fields=rename_fields, headers=headers) # check for any case-insensitive duplicate fields # https://github.com/cggh/scikit-allel/issues/215 ci_field_index = defaultdict(list) for f in fields: if rename_fields: f = rename_fields.get(f, f) ci_field_index[f.lower()].append(f) for k, v in ci_field_index.items(): if len(v) > 1: msg = textwrap.fill( 'Found two or more fields with the same name when compared ' 'case-insensitive: {!r}; this is not supported because it causes ' 'problems on platforms with a case-insensitive file system, which is ' 'usually the default on Windows and Mac OS. Please rename fields so they ' 'are distinct under a case-insensitive comparison via the ' 'rename_fields argument.'.format(v), width=80) raise ValueError(msg) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_zarr]') # read first chunk try: chunk, _, _, _ = next(it) except StopIteration: # no data, bail out return # open root group root = zarr.open_group(output, mode='a', path=group) if len(samples) > 0 and store_samples: # store samples if samples.dtype.kind == 'O': if PY2: dtype = 'unicode' else: dtype = 'str' else: dtype = samples.dtype root.create_dataset('samples', data=samples, compressor=None, overwrite=overwrite, dtype=dtype) # setup datasets # noinspection PyTypeChecker keys = _zarr_setup_datasets( chunk, root=root, chunk_length=chunk_length, chunk_width=chunk_width, compressor=compressor, overwrite=overwrite, headers=headers ) # store first chunk _zarr_store_chunk(root, keys, chunk) # store remaining chunks for chunk, _, _, _ in it: _zarr_store_chunk(root, keys, chunk)
python
def vcf_to_zarr(input, output, group='/', compressor='default', overwrite=False, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, chunk_width=DEFAULT_CHUNK_WIDTH, log=None): """Read data from a VCF file and load into a Zarr on-disk store. .. versionchanged:: 1.12.0 Now will not create any output files if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string {input} output : string {output} group : string Group within destination Zarr hierarchy to store data in. compressor : compressor Compression algorithm, e.g., zarr.Blosc(cname='zstd', clevel=1, shuffle=1). overwrite : bool {overwrite} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} chunk_width : int, optional {chunk_width} log : file-like, optional {log} """ import zarr # samples requested? # noinspection PyTypeChecker store_samples, fields = _prep_fields_param(fields) # setup chunk iterator fields, samples, headers, it = iter_vcf_chunks( input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) # handle field renaming if rename_fields: rename_fields, it = _do_rename(it, fields=fields, rename_fields=rename_fields, headers=headers) # check for any case-insensitive duplicate fields # https://github.com/cggh/scikit-allel/issues/215 ci_field_index = defaultdict(list) for f in fields: if rename_fields: f = rename_fields.get(f, f) ci_field_index[f.lower()].append(f) for k, v in ci_field_index.items(): if len(v) > 1: msg = textwrap.fill( 'Found two or more fields with the same name when compared ' 'case-insensitive: {!r}; this is not supported because it causes ' 'problems on platforms with a case-insensitive file system, which is ' 'usually the default on Windows and Mac OS. Please rename fields so they ' 'are distinct under a case-insensitive comparison via the ' 'rename_fields argument.'.format(v), width=80) raise ValueError(msg) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_zarr]') # read first chunk try: chunk, _, _, _ = next(it) except StopIteration: # no data, bail out return # open root group root = zarr.open_group(output, mode='a', path=group) if len(samples) > 0 and store_samples: # store samples if samples.dtype.kind == 'O': if PY2: dtype = 'unicode' else: dtype = 'str' else: dtype = samples.dtype root.create_dataset('samples', data=samples, compressor=None, overwrite=overwrite, dtype=dtype) # setup datasets # noinspection PyTypeChecker keys = _zarr_setup_datasets( chunk, root=root, chunk_length=chunk_length, chunk_width=chunk_width, compressor=compressor, overwrite=overwrite, headers=headers ) # store first chunk _zarr_store_chunk(root, keys, chunk) # store remaining chunks for chunk, _, _, _ in it: _zarr_store_chunk(root, keys, chunk)
Read data from a VCF file and load into a Zarr on-disk store. .. versionchanged:: 1.12.0 Now will not create any output files if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string {input} output : string {output} group : string Group within destination Zarr hierarchy to store data in. compressor : compressor Compression algorithm, e.g., zarr.Blosc(cname='zstd', clevel=1, shuffle=1). overwrite : bool {overwrite} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} chunk_width : int, optional {chunk_width} log : file-like, optional {log}
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L846-L993
cggh/scikit-allel
allel/io/vcf_read.py
iter_vcf_chunks
def iter_vcf_chunks(input, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH): """Iterate over chunks of data from a VCF file as NumPy arrays. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} Returns ------- fields : list of strings Normalised names of fields that will be extracted. samples : ndarray Samples for which data will be extracted. headers : VCFHeaders Tuple of metadata extracted from VCF headers. it : iterator Chunk iterator. """ # setup commmon keyword args kwds = dict(fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, chunk_length=chunk_length, fills=fills, samples=samples, region=region) # setup input stream stream = _setup_input_stream(input=input, region=region, tabix=tabix, buffer_size=buffer_size) # setup iterator fields, samples, headers, it = _iter_vcf_stream(stream, **kwds) # setup transformers if transformers is not None: # API flexibility if not isinstance(transformers, (list, tuple)): transformers = [transformers] for trans in transformers: fields = trans.transform_fields(fields) it = _chunk_iter_transform(it, transformers) return fields, samples, headers, it
python
def iter_vcf_chunks(input, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH): """Iterate over chunks of data from a VCF file as NumPy arrays. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} Returns ------- fields : list of strings Normalised names of fields that will be extracted. samples : ndarray Samples for which data will be extracted. headers : VCFHeaders Tuple of metadata extracted from VCF headers. it : iterator Chunk iterator. """ # setup commmon keyword args kwds = dict(fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, chunk_length=chunk_length, fills=fills, samples=samples, region=region) # setup input stream stream = _setup_input_stream(input=input, region=region, tabix=tabix, buffer_size=buffer_size) # setup iterator fields, samples, headers, it = _iter_vcf_stream(stream, **kwds) # setup transformers if transformers is not None: # API flexibility if not isinstance(transformers, (list, tuple)): transformers = [transformers] for trans in transformers: fields = trans.transform_fields(fields) it = _chunk_iter_transform(it, transformers) return fields, samples, headers, it
Iterate over chunks of data from a VCF file as NumPy arrays. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} Returns ------- fields : list of strings Normalised names of fields that will be extracted. samples : ndarray Samples for which data will be extracted. headers : VCFHeaders Tuple of metadata extracted from VCF headers. it : iterator Chunk iterator.
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L1080-L1158
cggh/scikit-allel
allel/io/vcf_read.py
vcf_to_dataframe
def vcf_to_dataframe(input, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None): """Read data from a VCF file into a pandas DataFrame. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- df : pandas.DataFrame """ import pandas # samples requested? # noinspection PyTypeChecker _, fields = _prep_fields_param(fields) # setup fields, _, _, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=[], transformers=transformers ) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_dataframe]') # read all chunks into a list chunks = [d[0] for d in it] # setup output output = None if chunks: # concatenate chunks output = pandas.concat([_chunk_to_dataframe(fields, chunk) for chunk in chunks]) return output
python
def vcf_to_dataframe(input, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None): """Read data from a VCF file into a pandas DataFrame. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- df : pandas.DataFrame """ import pandas # samples requested? # noinspection PyTypeChecker _, fields = _prep_fields_param(fields) # setup fields, _, _, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=[], transformers=transformers ) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_dataframe]') # read all chunks into a list chunks = [d[0] for d in it] # setup output output = None if chunks: # concatenate chunks output = pandas.concat([_chunk_to_dataframe(fields, chunk) for chunk in chunks]) return output
Read data from a VCF file into a pandas DataFrame. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- df : pandas.DataFrame
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L1801-L1881