_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 31
13.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q900
|
UriConnection._parse_uri_options
|
train
|
def _parse_uri_options(self, parsed_uri, use_ssl=False, ssl_options=None):
"""Parse the uri options.
:param parsed_uri:
:param bool use_ssl:
:return:
"""
ssl_options = ssl_options or {}
kwargs = urlparse.parse_qs(parsed_uri.query)
vhost = urlparse.unquote(parsed_uri.path[1:]) or DEFAULT_VIRTUAL_HOST
options = {
'ssl': use_ssl,
'virtual_host': vhost,
|
python
|
{
"resource": ""
}
|
q901
|
UriConnection._parse_ssl_options
|
train
|
def _parse_ssl_options(self, ssl_kwargs):
"""Parse TLS Options.
:param ssl_kwargs:
:rtype: dict
"""
ssl_options = {}
for key in ssl_kwargs:
if key not in compatibility.SSL_OPTIONS:
LOGGER.warning('invalid option: %s', key)
|
python
|
{
"resource": ""
}
|
q902
|
UriConnection._get_ssl_version
|
train
|
def _get_ssl_version(self, value):
"""Get the TLS Version.
:param str value:
:return: TLS Version
"""
return self._get_ssl_attribute(value, compatibility.SSL_VERSIONS,
|
python
|
{
"resource": ""
}
|
q903
|
UriConnection._get_ssl_validation
|
train
|
def _get_ssl_validation(self, value):
"""Get the TLS Validation option.
:param str value:
:return: TLS Certificate Options
"""
return self._get_ssl_attribute(value, compatibility.SSL_CERT_MAP,
|
python
|
{
"resource": ""
}
|
q904
|
UriConnection._get_ssl_attribute
|
train
|
def _get_ssl_attribute(value, mapping, default_value, warning_message):
"""Get the TLS attribute based on the compatibility mapping.
If no valid attribute can be found, fall-back on default and
display a warning.
:param str value:
|
python
|
{
"resource": ""
}
|
q905
|
ManagementApi.top
|
train
|
def top(self):
"""Top Processes.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
nodes
|
python
|
{
"resource": ""
}
|
q906
|
Basic.qos
|
train
|
def qos(self, prefetch_count=0, prefetch_size=0, global_=False):
"""Specify quality of service.
:param int prefetch_count: Prefetch window in messages
:param int/long prefetch_size: Prefetch window in octets
:param bool global_: Apply to entire connection
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_integer(prefetch_count):
|
python
|
{
"resource": ""
}
|
q907
|
Basic.get
|
train
|
def get(self, queue='', no_ack=False, to_dict=False, auto_decode=True):
"""Fetch a single message.
:param str queue: Queue name
:param bool no_ack: No acknowledgement needed
:param bool to_dict: Should incoming messages be converted to a
dictionary before delivery.
:param bool auto_decode: Auto-decode strings when possible.
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
|
python
|
{
"resource": ""
}
|
q908
|
Basic.recover
|
train
|
def recover(self, requeue=False):
"""Redeliver unacknowledged messages.
:param bool requeue: Re-queue the messages
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
|
python
|
{
"resource": ""
}
|
q909
|
Basic.cancel
|
train
|
def cancel(self, consumer_tag=''):
"""Cancel a queue consumer.
:param str consumer_tag: Consumer tag
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(consumer_tag):
|
python
|
{
"resource": ""
}
|
q910
|
Basic.reject
|
train
|
def reject(self, delivery_tag=0, requeue=True):
"""Reject Message.
:param int/long delivery_tag: Server-assigned delivery tag
:param bool requeue: Re-queue the message
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
if not compatibility.is_integer(delivery_tag):
|
python
|
{
"resource": ""
}
|
q911
|
Basic._consume_add_and_get_tag
|
train
|
def _consume_add_and_get_tag(self, consume_rpc_result):
"""Add the tag to the channel and return it.
:param dict consume_rpc_result:
|
python
|
{
"resource": ""
}
|
q912
|
Basic._consume_rpc_request
|
train
|
def _consume_rpc_request(self, arguments, consumer_tag, exclusive, no_ack,
no_local, queue):
"""Create a Consume Frame and execute a RPC request.
:param str queue: Queue name
:param str consumer_tag: Consumer tag
:param bool no_local: Do not deliver own messages
:param bool no_ack: No acknowledgement needed
:param bool exclusive: Request exclusive access
|
python
|
{
"resource": ""
}
|
q913
|
Basic._validate_publish_parameters
|
train
|
def _validate_publish_parameters(body, exchange, immediate, mandatory,
properties, routing_key):
"""Validate Publish Parameters.
:param bytes|str|unicode body: Message payload
:param str routing_key: Message routing key
:param str exchange: The exchange to publish the message to
:param dict properties: Message properties
:param bool mandatory: Requires the message is published
:param bool immediate: Request immediate delivery
:raises AMQPInvalidArgument: Invalid Parameters
:return:
"""
if not compatibility.is_string(body):
raise AMQPInvalidArgument('body should be a string')
elif not compatibility.is_string(routing_key):
|
python
|
{
"resource": ""
}
|
q914
|
Basic._handle_utf8_payload
|
train
|
def _handle_utf8_payload(body, properties):
"""Update the Body and Properties to the appropriate encoding.
:param bytes|str|unicode body: Message payload
:param dict properties: Message properties
:return:
"""
if 'content_encoding' not in properties:
properties['content_encoding'] = 'utf-8'
|
python
|
{
"resource": ""
}
|
q915
|
Basic._get_message
|
train
|
def _get_message(self, get_frame, auto_decode):
"""Get and return a message using a Basic.Get frame.
:param Basic.Get get_frame:
:param bool auto_decode: Auto-decode strings when possible.
:rtype: Message
"""
message_uuid = self._channel.rpc.register_request(
get_frame.valid_responses + ['ContentHeader', 'ContentBody']
)
try:
self._channel.write_frame(get_frame)
get_ok_frame = self._channel.rpc.get_request(message_uuid,
|
python
|
{
"resource": ""
}
|
q916
|
Basic._publish_confirm
|
train
|
def _publish_confirm(self, frames_out):
"""Confirm that message was published successfully.
:param list frames_out:
:rtype: bool
"""
confirm_uuid = self._channel.rpc.register_request(['Basic.Ack',
'Basic.Nack'])
self._channel.write_frames(frames_out)
|
python
|
{
"resource": ""
}
|
q917
|
Basic._create_content_body
|
train
|
def _create_content_body(self, body):
"""Split body based on the maximum frame size.
This function is based on code from Rabbitpy.
https://github.com/gmr/rabbitpy
:param bytes|str|unicode body: Message payload
:rtype: collections.Iterable
|
python
|
{
"resource": ""
}
|
q918
|
Basic._get_content_body
|
train
|
def _get_content_body(self, message_uuid, body_size):
"""Get Content Body using RPC requests.
:param str uuid_body: Rpc Identifier.
:param int body_size: Content Size.
:rtype: str
"""
body = bytes()
while len(body) < body_size:
|
python
|
{
"resource": ""
}
|
q919
|
Channel0.on_frame
|
train
|
def on_frame(self, frame_in):
"""Handle frames sent to Channel0.
:param frame_in: Amqp frame.
:return:
"""
LOGGER.debug('Frame Received: %s', frame_in.name)
if frame_in.name == 'Heartbeat':
return
elif frame_in.name == 'Connection.Close':
self._close_connection(frame_in)
elif frame_in.name == 'Connection.CloseOk':
self._close_connection_ok()
elif frame_in.name == 'Connection.Blocked':
self._blocked_connection(frame_in)
elif frame_in.name == 'Connection.Unblocked':
self._unblocked_connection()
elif frame_in.name == 'Connection.OpenOk':
self._set_connection_state(Stateful.OPEN)
|
python
|
{
"resource": ""
}
|
q920
|
Channel0._close_connection
|
train
|
def _close_connection(self, frame_in):
"""Connection Close.
:param specification.Connection.Close frame_in: Amqp frame.
:return:
"""
self._set_connection_state(Stateful.CLOSED)
if frame_in.reply_code != 200:
|
python
|
{
"resource": ""
}
|
q921
|
Channel0._blocked_connection
|
train
|
def _blocked_connection(self, frame_in):
"""Connection is Blocked.
:param frame_in:
:return:
"""
self.is_blocked = True
LOGGER.warning(
|
python
|
{
"resource": ""
}
|
q922
|
Channel0._send_start_ok
|
train
|
def _send_start_ok(self, frame_in):
"""Send Start OK frame.
:param specification.Connection.Start frame_in: Amqp frame.
:return:
"""
mechanisms = try_utf8_decode(frame_in.mechanisms)
if 'EXTERNAL' in mechanisms:
mechanism = 'EXTERNAL'
credentials = '\0\0'
elif 'PLAIN' in mechanisms:
mechanism = 'PLAIN'
credentials = self._plain_credentials()
else:
exception = AMQPConnectionError(
'Unsupported Security Mechanism(s): %s' %
|
python
|
{
"resource": ""
}
|
q923
|
Channel0._send_tune_ok
|
train
|
def _send_tune_ok(self, frame_in):
"""Send Tune OK frame.
:param specification.Connection.Tune frame_in: Tune frame.
:return:
"""
self.max_allowed_channels = self._negotiate(frame_in.channel_max,
MAX_CHANNELS)
self.max_frame_size = self._negotiate(frame_in.frame_max,
MAX_FRAME_SIZE)
LOGGER.debug(
'Negotiated max frame
|
python
|
{
"resource": ""
}
|
q924
|
Channel0._send_open_connection
|
train
|
def _send_open_connection(self):
"""Send Open Connection frame.
:return:
"""
open_frame = specification.Connection.Open(
|
python
|
{
"resource": ""
}
|
q925
|
Channel0._write_frame
|
train
|
def _write_frame(self, frame_out):
"""Write a pamqp frame from Channel0.
:param frame_out: Amqp frame.
:return:
|
python
|
{
"resource": ""
}
|
q926
|
Channel0._client_properties
|
train
|
def _client_properties():
"""AMQPStorm Client Properties.
:rtype: dict
"""
return {
'product': 'AMQPStorm',
'platform': 'Python %s (%s)' % (platform.python_version(),
platform.python_implementation()),
'capabilities': {
'basic.nack': True,
'connection.blocked': True,
'publisher_confirms': True,
|
python
|
{
"resource": ""
}
|
q927
|
Channel.build_inbound_messages
|
train
|
def build_inbound_messages(self, break_on_empty=False, to_tuple=False,
auto_decode=True):
"""Build messages in the inbound queue.
:param bool break_on_empty: Should we break the loop when there are
no more messages in our inbound queue.
This does not guarantee that the upstream
queue is empty, as it's possible that if
messages are consumed faster than
delivered, the inbound queue will then be
emptied and the consumption will be broken.
:param bool to_tuple: Should incoming messages be converted to a
tuple before delivery.
:param bool auto_decode: Auto-decode strings when possible.
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
|
python
|
{
"resource": ""
}
|
q928
|
Channel.check_for_errors
|
train
|
def check_for_errors(self):
"""Check connection and channel for errors.
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
try:
self._connection.check_for_errors()
except AMQPConnectionError:
self.set_state(self.CLOSED)
raise
|
python
|
{
"resource": ""
}
|
q929
|
Channel.confirm_deliveries
|
train
|
def confirm_deliveries(self):
"""Set the channel to confirm that each message has been
successfully delivered.
:raises AMQPChannelError: Raises if the channel encountered an error.
|
python
|
{
"resource": ""
}
|
q930
|
Channel.on_frame
|
train
|
def on_frame(self, frame_in):
"""Handle frame sent to this specific channel.
:param pamqp.Frame frame_in: Amqp frame.
:return:
"""
if self.rpc.on_frame(frame_in):
return
if frame_in.name in CONTENT_FRAME:
self._inbound.append(frame_in)
elif frame_in.name == 'Basic.Cancel':
self._basic_cancel(frame_in)
elif frame_in.name == 'Basic.CancelOk':
self.remove_consumer_tag(frame_in.consumer_tag)
elif frame_in.name == 'Basic.ConsumeOk':
self.add_consumer_tag(frame_in['consumer_tag'])
elif frame_in.name == 'Basic.Return':
self._basic_return(frame_in)
elif frame_in.name
|
python
|
{
"resource": ""
}
|
q931
|
Channel.process_data_events
|
train
|
def process_data_events(self, to_tuple=False, auto_decode=True):
"""Consume inbound messages.
:param bool to_tuple: Should incoming messages be converted to a
tuple before delivery.
:param bool auto_decode: Auto-decode strings when possible.
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
|
python
|
{
"resource": ""
}
|
q932
|
Channel.rpc_request
|
train
|
def rpc_request(self, frame_out, connection_adapter=None):
"""Perform a RPC Request.
:param specification.Frame frame_out: Amqp frame.
:rtype: dict
"""
|
python
|
{
"resource": ""
}
|
q933
|
Channel.start_consuming
|
train
|
def start_consuming(self, to_tuple=False, auto_decode=True):
"""Start consuming messages.
:param bool to_tuple: Should incoming messages be converted to a
tuple before delivery.
:param bool auto_decode: Auto-decode strings when possible.
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
|
python
|
{
"resource": ""
}
|
q934
|
Channel.stop_consuming
|
train
|
def stop_consuming(self):
"""Stop consuming messages.
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
if not self.consumer_tags:
|
python
|
{
"resource": ""
}
|
q935
|
Channel.write_frame
|
train
|
def write_frame(self, frame_out):
"""Write a pamqp frame from the current channel.
:param specification.Frame frame_out: A
|
python
|
{
"resource": ""
}
|
q936
|
Channel.write_frames
|
train
|
def write_frames(self, frames_out):
"""Write multiple pamqp frames from the current channel.
:param list frames_out:
|
python
|
{
"resource": ""
}
|
q937
|
Channel._basic_cancel
|
train
|
def _basic_cancel(self, frame_in):
"""Handle a Basic Cancel frame.
:param specification.Basic.Cancel frame_in: Amqp frame.
:return:
"""
LOGGER.warning(
'Received Basic.Cancel on
|
python
|
{
"resource": ""
}
|
q938
|
Channel._basic_return
|
train
|
def _basic_return(self, frame_in):
"""Handle a Basic Return Frame and treat it as an error.
:param specification.Basic.Return frame_in: Amqp frame.
:return:
"""
reply_text = try_utf8_decode(frame_in.reply_text)
message = (
"Message not delivered: %s (%s) to queue '%s' from exchange '%s'" %
(
reply_text,
frame_in.reply_code,
|
python
|
{
"resource": ""
}
|
q939
|
Channel._build_message
|
train
|
def _build_message(self, auto_decode):
"""Fetch and build a complete Message from the inbound queue.
:param bool auto_decode: Auto-decode strings when possible.
:rtype: Message
"""
with self.lock:
if len(self._inbound) < 2:
return None
headers = self._build_message_headers()
if not headers:
return None
basic_deliver, content_header = headers
body = self._build_message_body(content_header.body_size)
|
python
|
{
"resource": ""
}
|
q940
|
Channel._build_message_body
|
train
|
def _build_message_body(self, body_size):
"""Build the Message body from the inbound queue.
:rtype: str
"""
body = bytes()
while len(body) < body_size:
if not self._inbound:
self.check_for_errors()
sleep(IDLE_WAIT)
continue
|
python
|
{
"resource": ""
}
|
q941
|
User.create
|
train
|
def create(self, username, password, tags=''):
"""Create User.
:param str username: Username
:param str password: Password
:param str tags: Comma-separate list of tags (e.g. monitoring)
:rtype: None
"""
user_payload = json.dumps({
|
python
|
{
"resource": ""
}
|
q942
|
User.get_permission
|
train
|
def get_permission(self, username, virtual_host):
"""Get User permissions for the configured virtual host.
:param str username: Username
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
|
python
|
{
"resource": ""
}
|
q943
|
User.set_permission
|
train
|
def set_permission(self, username, virtual_host, configure_regex='.*',
write_regex='.*', read_regex='.*'):
"""Set User permissions for the configured virtual host.
:param str username: Username
:param str virtual_host: Virtual host name
:param str configure_regex: Permission pattern for configuration
operations for this user.
:param str write_regex: Permission pattern for write operations
for this user.
:param str read_regex: Permission pattern for read operations
|
python
|
{
"resource": ""
}
|
q944
|
User.delete_permission
|
train
|
def delete_permission(self, username, virtual_host):
"""Delete User permissions for the configured virtual host.
:param str username: Username
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
|
python
|
{
"resource": ""
}
|
q945
|
ScalableRpcServer.start_server
|
train
|
def start_server(self):
"""Start the RPC Server.
:return:
"""
self._stopped.clear()
if not self._connection or self._connection.is_closed:
self._create_connection()
while not self._stopped.is_set():
try:
# Check our connection for errors.
self._connection.check_for_errors()
self._update_consumers()
except amqpstorm.AMQPError as why:
|
python
|
{
"resource": ""
}
|
q946
|
ScalableRpcServer._update_consumers
|
train
|
def _update_consumers(self):
"""Update Consumers.
- Add more if requested.
- Make sure the consumers are healthy.
- Remove excess consumers.
:return:
"""
# Do we need to start more consumers.
consumer_to_start = \
min(max(self.number_of_consumers - len(self._consumers), 0), 2)
for _ in range(consumer_to_start):
consumer = Consumer(self.rpc_queue)
self._start_consumer(consumer)
self._consumers.append(consumer)
# Check that all our consumers are active.
|
python
|
{
"resource": ""
}
|
q947
|
HTTPClient.get
|
train
|
def get(self, path, payload=None, headers=None):
"""HTTP GET operation.
:param path: URI Path
:param payload: HTTP Body
:param headers: HTTP Headers
|
python
|
{
"resource": ""
}
|
q948
|
HTTPClient.post
|
train
|
def post(self, path, payload=None, headers=None):
"""HTTP POST operation.
:param path: URI Path
:param payload: HTTP Body
:param headers: HTTP Headers
|
python
|
{
"resource": ""
}
|
q949
|
HTTPClient.delete
|
train
|
def delete(self, path, payload=None, headers=None):
"""HTTP DELETE operation.
:param path: URI Path
:param payload: HTTP Body
:param headers: HTTP Headers
|
python
|
{
"resource": ""
}
|
q950
|
HTTPClient.put
|
train
|
def put(self, path, payload=None, headers=None):
"""HTTP PUT operation.
:param path: URI Path
:param payload: HTTP Body
:param headers: HTTP Headers
|
python
|
{
"resource": ""
}
|
q951
|
HTTPClient._request
|
train
|
def _request(self, method, path, payload=None, headers=None):
"""HTTP operation.
:param method: Operation type (e.g. post)
:param path: URI Path
:param payload: HTTP Body
:param headers: HTTP Headers
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:return: Response
"""
url = urlparse.urljoin(self._base_url, 'api/%s' % path)
|
python
|
{
"resource": ""
}
|
q952
|
HTTPClient._check_for_errors
|
train
|
def _check_for_errors(response, json_response):
"""Check payload for errors.
:param response: HTTP response
:param json_response: Json response
:raises ApiError: Raises if the remote server encountered an error.
:return:
"""
status_code = response.status_code
try:
response.raise_for_status()
except requests.HTTPError as why:
|
python
|
{
"resource": ""
}
|
q953
|
HealthChecks.get
|
train
|
def get(self, node=None):
"""Run basic healthchecks against the current node, or against a given
node.
Example response:
> {"status":"ok"}
> {"status":"failed","reason":"string"}
:param node: Node name
:raises ApiError: Raises if the remote server encountered an error.
|
python
|
{
"resource": ""
}
|
q954
|
mean_pairwise_difference
|
train
|
def mean_pairwise_difference(ac, an=None, fill=np.nan):
"""Calculate for each variant the mean number of pairwise differences
between chromosomes sampled from within a single population.
Parameters
----------
ac : array_like, int, shape (n_variants, n_alleles)
Allele counts array.
an : array_like, int, shape (n_variants,), optional
Allele numbers. If not provided, will be calculated from `ac`.
fill : float
Use this value where there are no pairs to compare (e.g.,
all allele calls are missing).
Returns
-------
mpd : ndarray, float, shape (n_variants,)
Notes
-----
The values returned by this function can be summed over a genome
region and divided by the number of accessible bases to estimate
nucleotide diversity, a.k.a. *pi*.
Examples
--------
>>> import allel
>>> h = allel.HaplotypeArray([[0, 0, 0, 0],
... [0, 0, 0, 1],
... [0, 0, 1, 1],
... [0, 1, 1, 1],
... [1, 1, 1, 1],
... [0, 0, 1, 2],
... [0, 1, 1, 2],
... [0, 1, -1, -1]])
>>> ac = h.count_alleles()
>>> allel.mean_pairwise_difference(ac)
array([0. , 0.5 , 0.66666667, 0.5 , 0. ,
0.83333333, 0.83333333, 1. ])
See Also
--------
sequence_diversity, windowed_diversity
"""
# This function calculates the mean
|
python
|
{
"resource": ""
}
|
q955
|
mean_pairwise_difference_between
|
train
|
def mean_pairwise_difference_between(ac1, ac2, an1=None, an2=None,
fill=np.nan):
"""Calculate for each variant the mean number of pairwise differences
between chromosomes sampled from two different populations.
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
an1 : array_like, int, shape (n_variants,), optional
Allele numbers for the first population. If not provided, will be
calculated from `ac1`.
an2 : array_like, int, shape (n_variants,), optional
Allele numbers for the second population. If not provided, will be
calculated from `ac2`.
fill : float
Use this value where there are no pairs to compare (e.g.,
all allele calls are missing).
Returns
-------
mpd : ndarray, float, shape (n_variants,)
Notes
-----
The values returned by this function can be summed over a genome
region and divided by the number of accessible bases to estimate
nucleotide divergence between two populations, a.k.a. *Dxy*.
Examples
--------
>>> import allel
>>> h = allel.HaplotypeArray([[0, 0, 0, 0],
... [0, 0, 0, 1],
... [0, 0, 1, 1],
... [0, 1, 1, 1],
... [1, 1, 1, 1],
... [0, 0, 1, 2],
... [0, 1, 1, 2],
... [0, 1, -1, -1]])
>>> ac1 = h.count_alleles(subpop=[0, 1])
>>> ac2 = h.count_alleles(subpop=[2, 3])
>>> allel.mean_pairwise_difference_between(ac1, ac2)
|
python
|
{
"resource": ""
}
|
q956
|
watterson_theta
|
train
|
def watterson_theta(pos, ac, start=None, stop=None,
is_accessible=None):
"""Calculate the value of Watterson's estimator over a given region.
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
ac : array_like, int, shape (n_variants, n_alleles)
Allele counts array.
start : int, optional
The position at which to start (1-based). Defaults to the first position.
stop : int, optional
The position at which to stop (1-based). Defaults to the last position.
is_accessible : array_like, bool, shape (len(contig),), optional
Boolean array indicating accessibility status for all positions in the
chromosome/contig.
Returns
-------
theta_hat_w : float
Watterson's estimator (theta hat per base).
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0]],
... [[0, 0], [0, 1]],
... [[0, 0], [1, 1]],
... [[0, 1], [1, 1]],
... [[1, 1], [1, 1]],
... [[0, 0], [1, 2]],
... [[0, 1], [1, 2]],
... [[0, 1], [-1, -1]],
... [[-1, -1], [-1, -1]]])
>>> ac = g.count_alleles()
>>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27]
>>> theta_hat_w = allel.watterson_theta(pos, ac, start=1, stop=31)
>>> theta_hat_w
0.10557184750733138
"""
# check inputs
if not isinstance(pos, SortedIndex):
|
python
|
{
"resource": ""
}
|
q957
|
tajima_d
|
train
|
def tajima_d(ac, pos=None, start=None, stop=None, min_sites=3):
"""Calculate the value of Tajima's D over a given region.
Parameters
----------
ac : array_like, int, shape (n_variants, n_alleles)
Allele counts array.
pos : array_like, int, shape (n_items,), optional
Variant positions, using 1-based coordinates, in ascending order.
start : int, optional
The position at which to start (1-based). Defaults to the first position.
stop : int, optional
The position at which to stop (1-based). Defaults to the last position.
min_sites : int, optional
Minimum number of segregating sites for which to calculate a value. If
there are fewer, np.nan is returned. Defaults to 3.
Returns
-------
D : float
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0]],
... [[0, 0], [0, 1]],
... [[0, 0], [1, 1]],
... [[0, 1], [1, 1]],
... [[1, 1], [1, 1]],
... [[0, 0], [1, 2]],
... [[0, 1], [1, 2]],
... [[0, 1],
|
python
|
{
"resource": ""
}
|
q958
|
moving_tajima_d
|
train
|
def moving_tajima_d(ac, size, start=0, stop=None, step=None, min_sites=3):
"""Calculate the value of Tajima's D in moving windows of `size` variants.
Parameters
----------
ac : array_like, int, shape (n_variants, n_alleles)
Allele counts array.
size : int
The window size (number of variants).
start : int, optional
The index at which to start.
stop : int, optional
The index at which to stop.
step : int, optional
The number of variants between start positions of windows. If not
given, defaults to the window size, i.e., non-overlapping windows.
min_sites : int, optional
Minimum number of segregating sites for which to calculate a value. If
there are fewer, np.nan is returned. Defaults to 3.
Returns
-------
d : ndarray, float, shape (n_windows,)
Tajima's D.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0]],
... [[0, 0], [0, 1]],
...
|
python
|
{
"resource": ""
}
|
q959
|
sfs
|
train
|
def sfs(dac, n=None):
"""Compute the site frequency spectrum given derived allele counts at
a set of biallelic variants.
Parameters
----------
dac : array_like, int, shape (n_variants,)
Array of derived allele counts.
n : int, optional
The total number of chromosomes called.
|
python
|
{
"resource": ""
}
|
q960
|
sfs_folded
|
train
|
def sfs_folded(ac, n=None):
"""Compute the folded site frequency spectrum given reference and
alternate allele counts at a set of biallelic variants.
Parameters
----------
ac : array_like, int, shape (n_variants, 2)
Allele counts array.
n : int, optional
The total number of chromosomes called.
Returns
-------
sfs_folded : ndarray, int, shape (n_chromosomes//2,)
Array where the kth element is the number of variant sites with a
minor allele count of k.
"""
# check input
ac, n
|
python
|
{
"resource": ""
}
|
q961
|
sfs_scaled
|
train
|
def sfs_scaled(dac, n=None):
"""Compute the site frequency spectrum scaled such that a constant value is
expected across the spectrum for neutral variation and constant
population size.
Parameters
----------
dac : array_like, int, shape (n_variants,)
Array of derived allele counts.
n : int, optional
The total number of chromosomes called.
Returns
|
python
|
{
"resource": ""
}
|
q962
|
scale_sfs
|
train
|
def scale_sfs(s):
"""Scale a site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes,)
Site frequency spectrum.
Returns
-------
|
python
|
{
"resource": ""
}
|
q963
|
sfs_folded_scaled
|
train
|
def sfs_folded_scaled(ac, n=None):
"""Compute the folded site frequency spectrum scaled such that a constant
value is expected across the spectrum for neutral variation and constant
population size.
Parameters
----------
ac : array_like, int, shape (n_variants, 2)
Allele counts array.
n : int, optional
|
python
|
{
"resource": ""
}
|
q964
|
scale_sfs_folded
|
train
|
def scale_sfs_folded(s, n):
"""Scale a folded site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes//2,)
Folded site frequency spectrum.
n : int
Number of chromosomes called.
Returns
-------
sfs_folded_scaled
|
python
|
{
"resource": ""
}
|
q965
|
joint_sfs
|
train
|
def joint_sfs(dac1, dac2, n1=None, n2=None):
"""Compute the joint site frequency spectrum between two populations.
Parameters
----------
dac1 : array_like, int, shape (n_variants,)
Derived allele counts for the first population.
dac2 : array_like, int, shape (n_variants,)
Derived allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs : ndarray, int, shape (m_chromosomes, n_chromosomes)
Array where the (i, j)th element is the number of variant sites with i
derived alleles in the first population and j derived alleles in the
|
python
|
{
"resource": ""
}
|
q966
|
joint_sfs_folded
|
train
|
def joint_sfs_folded(ac1, ac2, n1=None, n2=None):
"""Compute the joint folded site frequency spectrum between two
populations.
Parameters
----------
ac1 : array_like, int, shape (n_variants, 2)
Allele counts for the first population.
ac2 : array_like, int, shape (n_variants, 2)
Allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded : ndarray, int, shape (n1//2 + 1, n2//2 + 1)
Array where the (i, j)th element is the number of variant sites with a
minor allele count of i in the first population and j in the second
|
python
|
{
"resource": ""
}
|
q967
|
joint_sfs_scaled
|
train
|
def joint_sfs_scaled(dac1, dac2, n1=None, n2=None):
"""Compute the joint site frequency spectrum between two populations,
scaled such that a constant value is expected across the spectrum for
neutral variation, constant population size and unrelated populations.
Parameters
----------
dac1 : array_like, int, shape (n_variants,)
Derived allele counts for the first population.
dac2 : array_like, int, shape (n_variants,)
Derived allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_scaled : ndarray, int, shape (n1 + 1, n2 + 1)
|
python
|
{
"resource": ""
}
|
q968
|
scale_joint_sfs
|
train
|
def scale_joint_sfs(s):
"""Scale a joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n1, n2)
Joint site frequency spectrum.
|
python
|
{
"resource": ""
}
|
q969
|
joint_sfs_folded_scaled
|
train
|
def joint_sfs_folded_scaled(ac1, ac2, n1=None, n2=None):
"""Compute the joint folded site frequency spectrum between two
populations, scaled such that a constant value is expected across the
spectrum for neutral variation, constant population size and unrelated
populations.
Parameters
----------
ac1 : array_like, int, shape (n_variants, 2)
Allele counts for the first population.
ac2 : array_like, int, shape (n_variants, 2)
Allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded_scaled : ndarray, int, shape (n1//2 + 1, n2//2 + 1)
|
python
|
{
"resource": ""
}
|
q970
|
scale_joint_sfs_folded
|
train
|
def scale_joint_sfs_folded(s, n1, n2):
"""Scale a folded joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (m_chromosomes//2, n_chromosomes//2)
Folded joint site frequency spectrum.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded_scaled : ndarray, int, shape (m_chromosomes//2, n_chromosomes//2)
|
python
|
{
"resource": ""
}
|
q971
|
fold_sfs
|
train
|
def fold_sfs(s, n):
"""Fold a site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes,)
Site frequency spectrum
n : int
Total number of chromosomes called.
Returns
-------
sfs_folded : ndarray, int
|
python
|
{
"resource": ""
}
|
q972
|
fold_joint_sfs
|
train
|
def fold_joint_sfs(s, n1, n2):
"""Fold a joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (m_chromosomes, n_chromosomes)
Joint site frequency spectrum.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded : ndarray, int
Folded joint site frequency spectrum.
"""
# check inputs
s = asarray_ndim(s, 2)
assert s.shape[0] <= n1 + 1, 'invalid number of chromosomes'
assert s.shape[1] <= n2 + 1, 'invalid number of chromosomes'
# need to check s has all entries up to m
if s.shape[0] < n1 + 1:
sm = np.zeros((n1 + 1, s.shape[1]), dtype=s.dtype)
sm[:s.shape[0]] = s
|
python
|
{
"resource": ""
}
|
q973
|
plot_sfs
|
train
|
def plot_sfs(s, yscale='log', bins=None, n=None,
clip_endpoints=True, label=None, plot_kwargs=None,
ax=None):
"""Plot a site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes,)
Site frequency spectrum.
yscale : string, optional
Y axis scale.
bins : int or array_like, int, optional
Allele count bins.
n : int, optional
Number of chromosomes sampled. If provided, X axis will be plotted
as allele frequency, otherwise as allele count.
clip_endpoints : bool, optional
If True, do not plot first and last values from frequency spectrum.
label : string, optional
Label for data series in plot.
plot_kwargs : dict-like
Additional keyword arguments, passed through to ax.plot().
ax : axes, optional
Axes on which to draw. If not provided, a new figure will be created.
Returns
-------
ax : axes
The axes on which the plot was drawn.
"""
import matplotlib.pyplot as plt
import scipy
# check inputs
s = asarray_ndim(s, 1)
# setup axes
if ax is None:
fig, ax = plt.subplots()
# setup data
if bins is None:
if clip_endpoints:
x = np.arange(1, s.shape[0]-1)
y = s[1:-1]
else:
x = np.arange(s.shape[0])
y = s
else:
if clip_endpoints:
y, b, _ = scipy.stats.binned_statistic(
np.arange(1, s.shape[0]-1),
values=s[1:-1],
bins=bins,
|
python
|
{
"resource": ""
}
|
q974
|
plot_sfs_folded
|
train
|
def plot_sfs_folded(*args, **kwargs):
"""Plot a folded site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes/2,)
Site frequency spectrum.
yscale : string, optional
Y axis scale.
bins : int or array_like, int, optional
Allele count bins.
n : int, optional
Number of chromosomes sampled. If provided, X axis will be plotted
as allele frequency, otherwise as allele count.
clip_endpoints : bool, optional
If True, do not plot first and last values from frequency spectrum.
label : string, optional
Label for data series in plot.
plot_kwargs : dict-like
Additional keyword arguments, passed through
|
python
|
{
"resource": ""
}
|
q975
|
plot_sfs_scaled
|
train
|
def plot_sfs_scaled(*args, **kwargs):
"""Plot a scaled site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes,)
Site frequency spectrum.
yscale : string, optional
Y axis scale.
bins : int or array_like, int, optional
Allele count bins.
n : int, optional
Number of chromosomes sampled. If provided, X axis will be plotted
as allele frequency, otherwise as allele count.
clip_endpoints : bool, optional
If True, do not plot first and last values from frequency spectrum.
label : string, optional
Label for data series in plot.
plot_kwargs : dict-like
Additional keyword arguments,
|
python
|
{
"resource": ""
}
|
q976
|
plot_sfs_folded_scaled
|
train
|
def plot_sfs_folded_scaled(*args, **kwargs):
"""Plot a folded scaled site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes/2,)
Site frequency spectrum.
yscale : string, optional
Y axis scale.
bins : int or array_like, int, optional
Allele count bins.
n : int, optional
Number of chromosomes sampled. If provided, X axis will be plotted
as allele frequency, otherwise as allele count.
clip_endpoints : bool, optional
If True, do not plot first and last values from frequency spectrum.
label : string, optional
Label for data series in plot.
plot_kwargs : dict-like
Additional keyword arguments, passed through to ax.plot().
ax :
|
python
|
{
"resource": ""
}
|
q977
|
plot_joint_sfs_scaled
|
train
|
def plot_joint_sfs_scaled(*args, **kwargs):
"""Plot a scaled joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2)
Joint site frequency spectrum.
ax : axes, optional
Axes on which to draw. If not provided, a new figure will be created.
imshow_kwargs : dict-like
Additional keyword arguments, passed through to ax.imshow().
|
python
|
{
"resource": ""
}
|
q978
|
plot_joint_sfs_folded_scaled
|
train
|
def plot_joint_sfs_folded_scaled(*args, **kwargs):
"""Plot a scaled folded joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes_pop1/2, n_chromosomes_pop2/2)
Joint site frequency spectrum.
ax : axes, optional
Axes on which to draw. If not provided, a new figure will be created.
|
python
|
{
"resource": ""
}
|
q979
|
_prep_fields_param
|
train
|
def _prep_fields_param(fields):
"""Prepare the `fields` parameter, and determine whether or not to store samples."""
store_samples = False
if fields is None:
# add samples by default
return True, None
if isinstance(fields, str):
fields = [fields]
else:
|
python
|
{
"resource": ""
}
|
q980
|
_chunk_iter_progress
|
train
|
def _chunk_iter_progress(it, log, prefix):
"""Wrap a chunk iterator for progress logging."""
n_variants = 0
before_all = time.time()
before_chunk = before_all
for chunk, chunk_length, chrom, pos in it:
after_chunk = time.time()
elapsed_chunk = after_chunk - before_chunk
elapsed = after_chunk - before_all
n_variants += chunk_length
chrom = text_type(chrom, 'utf8')
message = (
'%s %s rows in %.2fs; chunk in %.2fs (%s rows/s)' %
(prefix, n_variants, elapsed, elapsed_chunk,
int(chunk_length // elapsed_chunk))
|
python
|
{
"resource": ""
}
|
q981
|
read_vcf
|
train
|
def read_vcf(input,
fields=None,
exclude_fields=None,
rename_fields=None,
types=None,
numbers=None,
alt_number=DEFAULT_ALT_NUMBER,
fills=None,
region=None,
tabix='tabix',
samples=None,
transformers=None,
buffer_size=DEFAULT_BUFFER_SIZE,
chunk_length=DEFAULT_CHUNK_LENGTH,
log=None):
"""Read data from a VCF file into NumPy arrays.
.. versionchanged:: 1.12.0
Now returns None if no variants are found in the VCF file or matching the
requested region.
Parameters
----------
input : string or file-like
{input}
fields : list of strings, optional
{fields}
exclude_fields : list of strings, optional
{exclude_fields}
rename_fields : dict[str -> str], optional
{rename_fields}
types : dict, optional
{types}
numbers : dict, optional
{numbers}
alt_number : int, optional
{alt_number}
fills : dict, optional
{fills}
region : string, optional
{region}
tabix : string, optional
{tabix}
samples : list of strings
{samples}
transformers : list of transformer objects, optional
{transformers}
buffer_size : int, optional
{buffer_size}
chunk_length : int, optional
{chunk_length}
log : file-like, optional
{log}
Returns
-------
data : dict[str, ndarray]
A dictionary holding arrays, or None if no variants were found.
|
python
|
{
"resource": ""
}
|
q982
|
vcf_to_npz
|
train
|
def vcf_to_npz(input, output,
compressed=True,
overwrite=False,
fields=None,
exclude_fields=None,
rename_fields=None,
types=None,
numbers=None,
alt_number=DEFAULT_ALT_NUMBER,
fills=None,
region=None,
tabix=True,
samples=None,
transformers=None,
buffer_size=DEFAULT_BUFFER_SIZE,
chunk_length=DEFAULT_CHUNK_LENGTH,
log=None):
"""Read data from a VCF file into NumPy arrays and save as a .npz file.
.. versionchanged:: 1.12.0
Now will not create any output file if no variants are found in the VCF file or
matching the requested region.
Parameters
----------
input : string
{input}
output : string
{output}
compressed : bool, optional
If True (default), save with compression.
overwrite : bool, optional
{overwrite}
fields : list of strings, optional
|
python
|
{
"resource": ""
}
|
q983
|
vcf_to_hdf5
|
train
|
def vcf_to_hdf5(input, output,
group='/',
compression='gzip',
compression_opts=1,
shuffle=False,
overwrite=False,
vlen=True,
fields=None,
exclude_fields=None,
rename_fields=None,
types=None,
numbers=None,
alt_number=DEFAULT_ALT_NUMBER,
fills=None,
region=None,
tabix='tabix',
samples=None,
transformers=None,
buffer_size=DEFAULT_BUFFER_SIZE,
chunk_length=DEFAULT_CHUNK_LENGTH,
chunk_width=DEFAULT_CHUNK_WIDTH,
log=None):
"""Read data from a VCF file and load into an HDF5 file.
.. versionchanged:: 1.12.0
Now will not create any output file if no variants are found in the VCF file or
matching the requested region.
Parameters
----------
input : string
{input}
output : string
{output}
group : string
Group within destination HDF5 file to store data in.
compression : string
Compression algorithm, e.g., 'gzip' (default).
compression_opts : int
Compression level, e.g., 1 (default).
shuffle : bool
Use byte shuffling, which may improve compression (default is False).
overwrite : bool
{overwrite}
vlen : bool
If True, store variable length strings. Note that there is considerable storage
overhead for variable length strings in HDF5, and leaving this option as True (
default) may lead to large file sizes. If False, all strings will be stored in
the HDF5 file as fixed length strings, even if they are specified as 'object'
type. In this case, the string length for any field with 'object' type will be
determined based on the maximum length of strings found in the first chunk,
and this may cause values to be truncated if longer values are found in later
chunks. To avoid truncation and large file sizes, manually set the type for all
string fields to an explicit fixed length string type, e.g., 'S10' for a field
where you know at most 10 characters are required.
fields : list of strings, optional
{fields}
exclude_fields : list of strings, optional
{exclude_fields}
rename_fields : dict[str -> str], optional
{rename_fields}
types : dict, optional
{types}
numbers : dict, optional
{numbers}
alt_number : int, optional
{alt_number}
fills : dict, optional
{fills}
region : string, optional
{region}
tabix : string, optional
{tabix}
samples : list of strings
{samples}
transformers : list of transformer objects, optional
{transformers}
buffer_size : int, optional
{buffer_size}
chunk_length : int, optional
{chunk_length}
chunk_width : int, optional
{chunk_width}
log : file-like, optional
{log}
|
python
|
{
"resource": ""
}
|
q984
|
vcf_to_zarr
|
train
|
def vcf_to_zarr(input, output,
group='/',
compressor='default',
overwrite=False,
fields=None,
exclude_fields=None,
rename_fields=None,
types=None,
numbers=None,
alt_number=DEFAULT_ALT_NUMBER,
fills=None,
region=None,
tabix='tabix',
samples=None,
transformers=None,
buffer_size=DEFAULT_BUFFER_SIZE,
chunk_length=DEFAULT_CHUNK_LENGTH,
chunk_width=DEFAULT_CHUNK_WIDTH,
log=None):
"""Read data from a VCF file and load into a Zarr on-disk store.
.. versionchanged:: 1.12.0
Now will not create any output files if no variants are found in the VCF file or
matching the requested region.
Parameters
----------
input : string
{input}
output : string
{output}
group : string
Group within destination Zarr hierarchy to store data in.
compressor : compressor
Compression algorithm, e.g., zarr.Blosc(cname='zstd', clevel=1, shuffle=1).
overwrite : bool
{overwrite}
fields : list of strings, optional
{fields}
exclude_fields : list of strings, optional
{exclude_fields}
rename_fields : dict[str -> str], optional
{rename_fields}
types : dict, optional
{types}
numbers : dict, optional
{numbers}
alt_number : int, optional
{alt_number}
fills : dict, optional
{fills}
region : string, optional
{region}
tabix : string, optional
{tabix}
samples : list of strings
{samples}
transformers : list of transformer objects, optional
{transformers}
buffer_size : int, optional
{buffer_size}
chunk_length : int, optional
{chunk_length}
chunk_width : int, optional
{chunk_width}
log : file-like, optional
{log}
"""
import zarr
# samples requested?
# noinspection PyTypeChecker
store_samples, fields = _prep_fields_param(fields)
# setup chunk iterator
fields, samples, headers, it = iter_vcf_chunks(
input, fields=fields, exclude_fields=exclude_fields, types=types,
numbers=numbers, alt_number=alt_number, buffer_size=buffer_size,
chunk_length=chunk_length, fills=fills, region=region, tabix=tabix,
samples=samples, transformers=transformers
)
# handle field renaming
if rename_fields:
rename_fields, it = _do_rename(it, fields=fields,
rename_fields=rename_fields,
headers=headers)
# check for any case-insensitive duplicate fields
# https://github.com/cggh/scikit-allel/issues/215
ci_field_index = defaultdict(list)
for f in fields:
|
python
|
{
"resource": ""
}
|
q985
|
iter_vcf_chunks
|
train
|
def iter_vcf_chunks(input,
fields=None,
exclude_fields=None,
types=None,
numbers=None,
alt_number=DEFAULT_ALT_NUMBER,
fills=None,
region=None,
tabix='tabix',
samples=None,
transformers=None,
buffer_size=DEFAULT_BUFFER_SIZE,
chunk_length=DEFAULT_CHUNK_LENGTH):
"""Iterate over chunks of data from a VCF file as NumPy arrays.
Parameters
----------
input : string
{input}
fields : list of strings, optional
{fields}
exclude_fields : list of strings, optional
{exclude_fields}
types : dict, optional
{types}
numbers : dict, optional
{numbers}
alt_number : int, optional
{alt_number}
fills : dict, optional
{fills}
region : string, optional
{region}
tabix : string, optional
{tabix}
samples : list of strings
{samples}
transformers : list of transformer objects, optional
{transformers}
buffer_size : int, optional
{buffer_size}
chunk_length : int, optional
{chunk_length}
Returns
-------
fields : list of strings
Normalised names of fields that will be extracted.
samples : ndarray
Samples for which
|
python
|
{
"resource": ""
}
|
q986
|
vcf_to_dataframe
|
train
|
def vcf_to_dataframe(input,
fields=None,
exclude_fields=None,
types=None,
numbers=None,
alt_number=DEFAULT_ALT_NUMBER,
fills=None,
region=None,
tabix='tabix',
transformers=None,
buffer_size=DEFAULT_BUFFER_SIZE,
chunk_length=DEFAULT_CHUNK_LENGTH,
log=None):
"""Read data from a VCF file into a pandas DataFrame.
Parameters
----------
input : string
{input}
fields : list of strings, optional
{fields}
exclude_fields : list of strings, optional
{exclude_fields}
types : dict, optional
{types}
numbers : dict, optional
{numbers}
alt_number : int, optional
{alt_number}
fills : dict, optional
{fills}
region : string, optional
{region}
tabix : string, optional
{tabix}
transformers : list of transformer objects, optional
{transformers}
buffer_size : int, optional
{buffer_size}
chunk_length : int, optional
{chunk_length}
log : file-like, optional
{log}
Returns
-------
|
python
|
{
"resource": ""
}
|
q987
|
vcf_to_recarray
|
train
|
def vcf_to_recarray(input,
fields=None,
exclude_fields=None,
types=None,
numbers=None,
alt_number=DEFAULT_ALT_NUMBER,
fills=None,
region=None,
tabix='tabix',
transformers=None,
buffer_size=DEFAULT_BUFFER_SIZE,
chunk_length=DEFAULT_CHUNK_LENGTH,
log=None):
"""Read data from a VCF file into a NumPy recarray.
Parameters
----------
input : string
{input}
fields : list of strings, optional
{fields}
exclude_fields : list of strings, optional
{exclude_fields}
types : dict, optional
{types}
numbers : dict, optional
{numbers}
alt_number : int, optional
{alt_number}
fills : dict, optional
{fills}
region : string, optional
{region}
tabix : string, optional
{tabix}
transformers : list of transformer objects, optional
{transformers}
buffer_size : int, optional
{buffer_size}
chunk_length : int, optional
{chunk_length}
log : file-like, optional
{log}
Returns
-------
ra : np.rec.array
"""
|
python
|
{
"resource": ""
}
|
q988
|
write_fasta
|
train
|
def write_fasta(path, sequences, names, mode='w', width=80):
"""Write nucleotide sequences stored as numpy arrays to a FASTA file.
Parameters
----------
path : string
File path.
sequences : sequence of arrays
One or more ndarrays of dtype 'S1' containing the sequences.
names : sequence of strings
Names of the sequences.
mode : string, optional
Use 'a' to append to an existing file.
width : int, optional
Maximum line width.
"""
# check inputs
if isinstance(sequences, np.ndarray):
# single sequence
sequences = [sequences]
names = [names]
if len(sequences) != len(names):
raise ValueError('must provide the same number of sequences and names')
for sequence in sequences:
if sequence.dtype !=
|
python
|
{
"resource": ""
}
|
q989
|
heterozygosity_observed
|
train
|
def heterozygosity_observed(g, fill=np.nan):
"""Calculate the rate of observed heterozygosity for each variant.
Parameters
----------
g : array_like, int, shape (n_variants, n_samples, ploidy)
Genotype array.
fill : float, optional
Use this value for variants where all calls are missing.
Returns
-------
ho : ndarray, float, shape (n_variants,)
Observed heterozygosity
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]],
... [[0, 0], [0, 1], [1, 1]],
... [[0, 0], [1, 1], [2, 2]],
... [[1, 1], [1, 2], [-1, -1]]])
>>> allel.heterozygosity_observed(g)
array([0. , 0.33333333, 0. , 0.5
|
python
|
{
"resource": ""
}
|
q990
|
heterozygosity_expected
|
train
|
def heterozygosity_expected(af, ploidy, fill=np.nan):
"""Calculate the expected rate of heterozygosity for each variant
under Hardy-Weinberg equilibrium.
Parameters
----------
af : array_like, float, shape (n_variants, n_alleles)
Allele frequencies array.
ploidy : int
Sample ploidy.
fill : float, optional
Use this value for variants where allele frequencies do not sum to 1.
Returns
-------
he : ndarray, float, shape (n_variants,)
Expected heterozygosity
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]],
... [[0, 0], [0, 1], [1, 1]],
... [[0, 0], [1, 1], [2, 2]],
... [[1, 1], [1, 2], [-1, -1]]])
>>> af
|
python
|
{
"resource": ""
}
|
q991
|
inbreeding_coefficient
|
train
|
def inbreeding_coefficient(g, fill=np.nan):
"""Calculate the inbreeding coefficient for each variant.
Parameters
----------
g : array_like, int, shape (n_variants, n_samples, ploidy)
Genotype array.
fill : float, optional
Use this value for variants where the expected heterozygosity is
zero.
Returns
-------
f : ndarray, float, shape (n_variants,)
Inbreeding coefficient.
Notes
-----
The inbreeding coefficient is calculated as *1 - (Ho/He)* where *Ho* is
the observed heterozygosity and *He* is the expected heterozygosity.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]],
... [[0, 0], [0, 1], [1, 1]],
... [[0, 0], [1, 1], [2, 2]],
... [[1, 1], [1, 2], [-1, -1]]])
>>> allel.inbreeding_coefficient(g)
|
python
|
{
"resource": ""
}
|
q992
|
mendel_errors
|
train
|
def mendel_errors(parent_genotypes, progeny_genotypes):
"""Locate genotype calls not consistent with Mendelian transmission of
alleles.
Parameters
----------
parent_genotypes : array_like, int, shape (n_variants, 2, 2)
Genotype calls for the two parents.
progeny_genotypes : array_like, int, shape (n_variants, n_progeny, 2)
Genotype calls for the progeny.
Returns
-------
me : ndarray, int, shape (n_variants, n_progeny)
Count of Mendel errors for each progeny genotype call.
Examples
--------
The following are all consistent with Mendelian transmission. Note that a
value of 0 is returned for missing calls::
>>> import allel
>>> import numpy as np
>>> genotypes = np.array([
... # aa x aa -> aa
... [[0, 0], [0, 0], [0, 0], [-1, -1], [-1, -1], [-1, -1]],
... [[1, 1], [1, 1], [1, 1], [-1, -1], [-1, -1], [-1, -1]],
... [[2, 2], [2, 2], [2, 2], [-1, -1], [-1, -1], [-1, -1]],
... # aa x ab -> aa or ab
... [[0, 0], [0, 1], [0, 0], [0, 1], [-1, -1], [-1, -1]],
... [[0, 0], [0, 2], [0, 0], [0, 2], [-1, -1], [-1, -1]],
... [[1, 1], [0, 1], [1, 1], [0, 1], [-1, -1], [-1, -1]],
... # aa x bb -> ab
... [[0, 0], [1, 1], [0, 1], [-1, -1], [-1, -1], [-1, -1]],
... [[0, 0], [2, 2], [0, 2], [-1, -1], [-1, -1], [-1, -1]],
... [[1, 1], [2, 2], [1, 2], [-1, -1], [-1, -1], [-1, -1]],
... # aa x bc -> ab or ac
... [[0, 0], [1, 2], [0, 1], [0, 2], [-1, -1], [-1, -1]],
... [[1, 1], [0, 2], [0, 1], [1, 2], [-1, -1], [-1, -1]],
... # ab x ab -> aa or ab or bb
... [[0, 1], [0, 1], [0, 0], [0, 1], [1, 1], [-1, -1]],
... [[1, 2], [1, 2], [1, 1], [1, 2], [2, 2], [-1, -1]],
... [[0, 2], [0, 2], [0, 0], [0, 2], [2, 2], [-1, -1]],
... # ab x bc -> ab or ac or bb or bc
... [[0, 1], [1, 2], [0, 1], [0, 2], [1, 1], [1, 2]],
... [[0, 1], [0, 2], [0, 0], [0, 1], [0, 1], [1, 2]],
... # ab x cd -> ac or ad or bc or bd
... [[0, 1], [2, 3], [0, 2], [0, 3], [1, 2], [1, 3]],
... ])
>>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:])
>>> me
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
The following are cases of 'non-parental' inheritance where one or two
alleles are found in the progeny that are not present in either parent.
Note that the number of errors may be 1 or 2 depending on the number of
non-parental alleles::
>>> genotypes = np.array([
... # aa x aa -> ab or ac or bb or cc
... [[0, 0], [0, 0], [0, 1], [0, 2], [1, 1], [2, 2]],
... [[1, 1], [1, 1], [0, 1], [1, 2], [0, 0], [2, 2]],
... [[2, 2], [2, 2], [0, 2], [1, 2], [0, 0], [1, 1]],
... # aa x ab -> ac or bc or cc
... [[0, 0], [0, 1], [0, 2], [1, 2], [2, 2], [2, 2]],
... [[0, 0], [0, 2],
|
python
|
{
"resource": ""
}
|
q993
|
paint_transmission
|
train
|
def paint_transmission(parent_haplotypes, progeny_haplotypes):
"""Paint haplotypes inherited from a single diploid parent according to
their allelic inheritance.
Parameters
----------
parent_haplotypes : array_like, int, shape (n_variants, 2)
Both haplotypes from a single diploid parent.
progeny_haplotypes : array_like, int, shape (n_variants, n_progeny)
Haplotypes found in progeny of the given parent, inherited from the
given parent. I.e., haplotypes from gametes of the given parent.
Returns
-------
painting : ndarray, uint8, shape (n_variants, n_progeny)
An array of integers coded as follows: 1 = allele inherited from
first parental haplotype; 2 = allele inherited from second parental
haplotype; 3 = reference allele, also carried by both parental
haplotypes; 4 = non-reference allele, also carried by both parental
haplotypes; 5 = non-parental allele; 6 = either or both parental
alleles missing; 7 = missing allele; 0 = undetermined.
Examples
--------
>>> import allel
>>> haplotypes = allel.HaplotypeArray([
... [0, 0, 0, 1, 2, -1],
... [0, 1, 0, 1, 2, -1],
... [1, 0, 0, 1, 2, -1],
... [1, 1, 0, 1, 2, -1],
... [0, 2, 0, 1, 2, -1],
... [0, -1, 0, 1, 2, -1],
... [-1, 1, 0, 1, 2, -1],
... [-1, -1, 0, 1, 2, -1],
... ], dtype='i1')
>>> painting = allel.paint_transmission(haplotypes[:, :2],
... haplotypes[:, 2:])
>>> painting
array([[3, 5, 5, 7],
[1, 2, 5, 7],
[2, 1, 5, 7],
[5, 4, 5, 7],
[1, 5, 2, 7],
[6, 6, 6, 7],
[6, 6, 6, 7],
[6, 6, 6, 7]], dtype=uint8)
"""
# check inputs
parent_haplotypes = HaplotypeArray(parent_haplotypes)
progeny_haplotypes = HaplotypeArray(progeny_haplotypes)
if parent_haplotypes.n_haplotypes != 2:
raise ValueError('exactly two parental haplotypes should be provided')
# convenience variables
parent1 = parent_haplotypes[:, 0, np.newaxis]
parent2 = parent_haplotypes[:, 1, np.newaxis]
progeny_is_missing = progeny_haplotypes < 0
parent_is_missing = np.any(parent_haplotypes < 0, axis=1)
# need this for broadcasting, but also need to retain original for later
parent_is_missing_bc = parent_is_missing[:, np.newaxis]
parent_diplotype = GenotypeArray(parent_haplotypes[:, np.newaxis, :])
parent_is_hom_ref = parent_diplotype.is_hom_ref()
parent_is_het = parent_diplotype.is_het()
|
python
|
{
"resource": ""
}
|
q994
|
phase_progeny_by_transmission
|
train
|
def phase_progeny_by_transmission(g):
"""Phase progeny genotypes from a trio or cross using Mendelian
transmission.
Parameters
----------
g : array_like, int, shape (n_variants, n_samples, 2)
Genotype array, with parents as first two columns and progeny as
remaining columns.
Returns
-------
g : ndarray, int8, shape (n_variants, n_samples, 2)
Genotype array with progeny phased where possible.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([
... [[0, 0], [0, 0], [0, 0]],
... [[1, 1], [1, 1], [1, 1]],
... [[0, 0], [1, 1], [0, 1]],
... [[1, 1], [0, 0], [0, 1]],
... [[0, 0], [0, 1], [0, 0]],
... [[0, 0], [0, 1], [0, 1]],
... [[0, 1], [0, 0], [0, 1]],
... [[0, 1], [0, 1], [0, 1]],
... [[0, 1], [1, 2], [0, 1]],
... [[1, 2], [0, 1], [1, 2]],
... [[0, 1], [2, 3], [0, 2]],
... [[2, 3], [0, 1], [1, 3]],
... [[0, 0], [0, 0], [-1, -1]],
... [[0, 0], [0, 0], [1, 1]],
... ], dtype='i1')
>>> g = allel.phase_progeny_by_transmission(g)
>>> print(g.to_str(row_threshold=None))
0/0 0/0 0|0
1/1 1/1 1|1
0/0 1/1 0|1
1/1 0/0 1|0
0/0 0/1 0|0
0/0 0/1 0|1
0/1 0/0 1|0
0/1 0/1 0/1
0/1 1/2 0|1
|
python
|
{
"resource": ""
}
|
q995
|
phase_parents_by_transmission
|
train
|
def phase_parents_by_transmission(g, window_size):
"""Phase parent genotypes from a trio or cross, given progeny genotypes
already phased by Mendelian transmission.
Parameters
----------
g : GenotypeArray
Genotype array, with parents as first two columns and progeny as
remaining columns, where progeny genotypes are already phased.
window_size : int
Number of previous heterozygous sites to include when phasing each
parent. A number somewhere between 10 and 100 may be appropriate,
depending on levels of heterozygosity and quality of data.
Returns
-------
g : GenotypeArray
Genotype array with parents phased where possible.
"""
# setup
|
python
|
{
"resource": ""
}
|
q996
|
phase_by_transmission
|
train
|
def phase_by_transmission(g, window_size, copy=True):
"""Phase genotypes in a trio or cross where possible using Mendelian
transmission.
Parameters
----------
g : array_like, int, shape (n_variants, n_samples, 2)
Genotype array, with parents as first two columns and progeny as
remaining columns.
window_size : int
Number of previous heterozygous sites to include when phasing each
parent. A number somewhere between 10 and 100 may be appropriate,
depending on levels of heterozygosity and quality of data.
copy : bool, optional
If False, attempt to phase genotypes in-place. Note that this is
only possible if the input array has int8 dtype, otherwise a copy is
always made regardless of this parameter.
Returns
|
python
|
{
"resource": ""
}
|
q997
|
get_blen_array
|
train
|
def get_blen_array(data, blen=None):
"""Try to guess a reasonable block length to use for block-wise iteration
over `data`."""
if blen is None:
if hasattr(data, 'chunklen'):
# bcolz carray
return data.chunklen
elif hasattr(data, 'chunks') and \
hasattr(data, 'shape') and \
hasattr(data.chunks, '__len__') and \
hasattr(data.shape, '__len__') and \
len(data.chunks) == len(data.shape):
|
python
|
{
"resource": ""
}
|
q998
|
h5fmem
|
train
|
def h5fmem(**kwargs):
"""Create an in-memory HDF5 file."""
# need a file name even tho nothing is ever written
fn = tempfile.mktemp()
# file creation args
kwargs['mode'] = 'w'
|
python
|
{
"resource": ""
}
|
q999
|
h5ftmp
|
train
|
def h5ftmp(**kwargs):
"""Create an HDF5 file backed by a temporary file."""
# create temporary file name
suffix = kwargs.pop('suffix', '.h5')
prefix = kwargs.pop('prefix', 'scikit_allel_')
tempdir = kwargs.pop('dir', None)
fn = tempfile.mktemp(suffix=suffix, prefix=prefix, dir=tempdir)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.