Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
from_pandas_points_labels | (df) | Convert point ``pandas.DataFrame`` to list of tuples.
Convert a ``pandas.DataFrame`` of labeled data where each
timestamp is labeled by either 0 or 1 to a list of tuples
marking the start and end interval of anomalies; make it
contextually defined.
Args:
df (DataFrame):
anomalies, passed as ``pandas.DataFrame``
containing two columns: timestamp and label.
Returns:
list:
tuple (start, end) timestamp.
Raises:
KeyError:
If the received ``pandas.DataFrame`` does not contain the required columns.
| Convert point ``pandas.DataFrame`` to list of tuples. | def from_pandas_points_labels(df):
""" Convert point ``pandas.DataFrame`` to list of tuples.
Convert a ``pandas.DataFrame`` of labeled data where each
timestamp is labeled by either 0 or 1 to a list of tuples
marking the start and end interval of anomalies; make it
contextually defined.
Args:
df (DataFrame):
anomalies, passed as ``pandas.DataFrame``
containing two columns: timestamp and label.
Returns:
list:
tuple (start, end) timestamp.
Raises:
KeyError:
If the received ``pandas.DataFrame`` does not contain the required columns.
"""
require = ['timestamp', 'label']
columns = df.columns.tolist()
if not all(x in columns for x in require):
raise KeyError('{} not found in columns: {}.'.format(require, columns))
df = df[df['label'] == 1]
return from_pandas_points(df) | [
"def",
"from_pandas_points_labels",
"(",
"df",
")",
":",
"require",
"=",
"[",
"'timestamp'",
",",
"'label'",
"]",
"columns",
"=",
"df",
".",
"columns",
".",
"tolist",
"(",
")",
"if",
"not",
"all",
"(",
"x",
"in",
"columns",
"for",
"x",
"in",
"require",
")",
":",
"raise",
"KeyError",
"(",
"'{} not found in columns: {}.'",
".",
"format",
"(",
"require",
",",
"columns",
")",
")",
"df",
"=",
"df",
"[",
"df",
"[",
"'label'",
"]",
"==",
"1",
"]",
"return",
"from_pandas_points",
"(",
"df",
")"
] | [
99,
0
] | [
127,
33
] | python | en | ['en', 'lb', 'en'] | True |
from_list_points_labels | (labels) | Convert list of labels to list of tuples.
Convert a list of labels to a list of tuples
marking the start and end interval of anomalies by
defining a dummy timestamp range for usage.
Args:
labels (list): contains binary labels [0, 1].
Returns:
list:
tuple (start, end) timestamp.
| Convert list of labels to list of tuples. | def from_list_points_labels(labels):
""" Convert list of labels to list of tuples.
Convert a list of labels to a list of tuples
marking the start and end interval of anomalies by
defining a dummy timestamp range for usage.
Args:
labels (list): contains binary labels [0, 1].
Returns:
list:
tuple (start, end) timestamp.
"""
timestamps = np.arange(len(labels))
return from_pandas_points_labels(pd.DataFrame({"timestamp": timestamps, "label": labels})) | [
"def",
"from_list_points_labels",
"(",
"labels",
")",
":",
"timestamps",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"labels",
")",
")",
"return",
"from_pandas_points_labels",
"(",
"pd",
".",
"DataFrame",
"(",
"{",
"\"timestamp\"",
":",
"timestamps",
",",
"\"label\"",
":",
"labels",
"}",
")",
")"
] | [
130,
0
] | [
146,
94
] | python | en | ['en', 'en', 'en'] | True |
memory_stream_pump | (memory_send_stream, memory_receive_stream, *, max_bytes=None) | Take data out of the given :class:`MemorySendStream`'s internal buffer,
and put it into the given :class:`MemoryReceiveStream`'s internal buffer.
Args:
memory_send_stream (MemorySendStream): The stream to get data from.
memory_receive_stream (MemoryReceiveStream): The stream to put data into.
max_bytes (int or None): The maximum amount of data to transfer in this
call, or None to transfer all available data.
Returns:
True if it successfully transferred some data, or False if there was no
data to transfer.
This is used to implement :func:`memory_stream_one_way_pair` and
:func:`memory_stream_pair`; see the latter's docstring for an example
of how you might use it yourself.
| Take data out of the given :class:`MemorySendStream`'s internal buffer,
and put it into the given :class:`MemoryReceiveStream`'s internal buffer. | def memory_stream_pump(memory_send_stream, memory_receive_stream, *, max_bytes=None):
"""Take data out of the given :class:`MemorySendStream`'s internal buffer,
and put it into the given :class:`MemoryReceiveStream`'s internal buffer.
Args:
memory_send_stream (MemorySendStream): The stream to get data from.
memory_receive_stream (MemoryReceiveStream): The stream to put data into.
max_bytes (int or None): The maximum amount of data to transfer in this
call, or None to transfer all available data.
Returns:
True if it successfully transferred some data, or False if there was no
data to transfer.
This is used to implement :func:`memory_stream_one_way_pair` and
:func:`memory_stream_pair`; see the latter's docstring for an example
of how you might use it yourself.
"""
try:
data = memory_send_stream.get_data_nowait(max_bytes)
except _core.WouldBlock:
return False
try:
if not data:
memory_receive_stream.put_eof()
else:
memory_receive_stream.put_data(data)
except _core.ClosedResourceError:
raise _core.BrokenResourceError("MemoryReceiveStream was closed")
return True | [
"def",
"memory_stream_pump",
"(",
"memory_send_stream",
",",
"memory_receive_stream",
",",
"*",
",",
"max_bytes",
"=",
"None",
")",
":",
"try",
":",
"data",
"=",
"memory_send_stream",
".",
"get_data_nowait",
"(",
"max_bytes",
")",
"except",
"_core",
".",
"WouldBlock",
":",
"return",
"False",
"try",
":",
"if",
"not",
"data",
":",
"memory_receive_stream",
".",
"put_eof",
"(",
")",
"else",
":",
"memory_receive_stream",
".",
"put_data",
"(",
"data",
")",
"except",
"_core",
".",
"ClosedResourceError",
":",
"raise",
"_core",
".",
"BrokenResourceError",
"(",
"\"MemoryReceiveStream was closed\"",
")",
"return",
"True"
] | [
262,
0
] | [
292,
15
] | python | en | ['en', 'en', 'en'] | True |
memory_stream_one_way_pair | () | Create a connected, pure-Python, unidirectional stream with infinite
buffering and flexible configuration options.
You can think of this as being a no-operating-system-involved
Trio-streamsified version of :func:`os.pipe` (except that :func:`os.pipe`
returns the streams in the wrong order – we follow the superior convention
that data flows from left to right).
Returns:
A tuple (:class:`MemorySendStream`, :class:`MemoryReceiveStream`), where
the :class:`MemorySendStream` has its hooks set up so that it calls
:func:`memory_stream_pump` from its
:attr:`~MemorySendStream.send_all_hook` and
:attr:`~MemorySendStream.close_hook`.
The end result is that data automatically flows from the
:class:`MemorySendStream` to the :class:`MemoryReceiveStream`. But you're
also free to rearrange things however you like. For example, you can
temporarily set the :attr:`~MemorySendStream.send_all_hook` to None if you
want to simulate a stall in data transmission. Or see
:func:`memory_stream_pair` for a more elaborate example.
| Create a connected, pure-Python, unidirectional stream with infinite
buffering and flexible configuration options. | def memory_stream_one_way_pair():
"""Create a connected, pure-Python, unidirectional stream with infinite
buffering and flexible configuration options.
You can think of this as being a no-operating-system-involved
Trio-streamsified version of :func:`os.pipe` (except that :func:`os.pipe`
returns the streams in the wrong order – we follow the superior convention
that data flows from left to right).
Returns:
A tuple (:class:`MemorySendStream`, :class:`MemoryReceiveStream`), where
the :class:`MemorySendStream` has its hooks set up so that it calls
:func:`memory_stream_pump` from its
:attr:`~MemorySendStream.send_all_hook` and
:attr:`~MemorySendStream.close_hook`.
The end result is that data automatically flows from the
:class:`MemorySendStream` to the :class:`MemoryReceiveStream`. But you're
also free to rearrange things however you like. For example, you can
temporarily set the :attr:`~MemorySendStream.send_all_hook` to None if you
want to simulate a stall in data transmission. Or see
:func:`memory_stream_pair` for a more elaborate example.
"""
send_stream = MemorySendStream()
recv_stream = MemoryReceiveStream()
def pump_from_send_stream_to_recv_stream():
memory_stream_pump(send_stream, recv_stream)
async def async_pump_from_send_stream_to_recv_stream():
pump_from_send_stream_to_recv_stream()
send_stream.send_all_hook = async_pump_from_send_stream_to_recv_stream
send_stream.close_hook = pump_from_send_stream_to_recv_stream
return send_stream, recv_stream | [
"def",
"memory_stream_one_way_pair",
"(",
")",
":",
"send_stream",
"=",
"MemorySendStream",
"(",
")",
"recv_stream",
"=",
"MemoryReceiveStream",
"(",
")",
"def",
"pump_from_send_stream_to_recv_stream",
"(",
")",
":",
"memory_stream_pump",
"(",
"send_stream",
",",
"recv_stream",
")",
"async",
"def",
"async_pump_from_send_stream_to_recv_stream",
"(",
")",
":",
"pump_from_send_stream_to_recv_stream",
"(",
")",
"send_stream",
".",
"send_all_hook",
"=",
"async_pump_from_send_stream_to_recv_stream",
"send_stream",
".",
"close_hook",
"=",
"pump_from_send_stream_to_recv_stream",
"return",
"send_stream",
",",
"recv_stream"
] | [
295,
0
] | [
330,
35
] | python | en | ['en', 'en', 'en'] | True |
memory_stream_pair | () | Create a connected, pure-Python, bidirectional stream with infinite
buffering and flexible configuration options.
This is a convenience function that creates two one-way streams using
:func:`memory_stream_one_way_pair`, and then uses
:class:`~trio.StapledStream` to combine them into a single bidirectional
stream.
This is like a no-operating-system-involved, Trio-streamsified version of
:func:`socket.socketpair`.
Returns:
A pair of :class:`~trio.StapledStream` objects that are connected so
that data automatically flows from one to the other in both directions.
After creating a stream pair, you can send data back and forth, which is
enough for simple tests::
left, right = memory_stream_pair()
await left.send_all(b"123")
assert await right.receive_some() == b"123"
await right.send_all(b"456")
assert await left.receive_some() == b"456"
But if you read the docs for :class:`~trio.StapledStream` and
:func:`memory_stream_one_way_pair`, you'll see that all the pieces
involved in wiring this up are public APIs, so you can adjust to suit the
requirements of your tests. For example, here's how to tweak a stream so
that data flowing from left to right trickles in one byte at a time (but
data flowing from right to left proceeds at full speed)::
left, right = memory_stream_pair()
async def trickle():
# left is a StapledStream, and left.send_stream is a MemorySendStream
# right is a StapledStream, and right.recv_stream is a MemoryReceiveStream
while memory_stream_pump(left.send_stream, right.recv_stream, max_bytes=1):
# Pause between each byte
await trio.sleep(1)
# Normally this send_all_hook calls memory_stream_pump directly without
# passing in a max_bytes. We replace it with our custom version:
left.send_stream.send_all_hook = trickle
And here's a simple test using our modified stream objects::
async def sender():
await left.send_all(b"12345")
await left.send_eof()
async def receiver():
async for data in right:
print(data)
async with trio.open_nursery() as nursery:
nursery.start_soon(sender)
nursery.start_soon(receiver)
By default, this will print ``b"12345"`` and then immediately exit; with
our trickle stream it instead sleeps 1 second, then prints ``b"1"``, then
sleeps 1 second, then prints ``b"2"``, etc.
Pro-tip: you can insert sleep calls (like in our example above) to
manipulate the flow of data across tasks... and then use
:class:`MockClock` and its :attr:`~MockClock.autojump_threshold`
functionality to keep your test suite running quickly.
If you want to stress test a protocol implementation, one nice trick is to
use the :mod:`random` module (preferably with a fixed seed) to move random
numbers of bytes at a time, and insert random sleeps in between them. You
can also set up a custom :attr:`~MemoryReceiveStream.receive_some_hook` if
you want to manipulate things on the receiving side, and not just the
sending side.
| Create a connected, pure-Python, bidirectional stream with infinite
buffering and flexible configuration options. | def memory_stream_pair():
"""Create a connected, pure-Python, bidirectional stream with infinite
buffering and flexible configuration options.
This is a convenience function that creates two one-way streams using
:func:`memory_stream_one_way_pair`, and then uses
:class:`~trio.StapledStream` to combine them into a single bidirectional
stream.
This is like a no-operating-system-involved, Trio-streamsified version of
:func:`socket.socketpair`.
Returns:
A pair of :class:`~trio.StapledStream` objects that are connected so
that data automatically flows from one to the other in both directions.
After creating a stream pair, you can send data back and forth, which is
enough for simple tests::
left, right = memory_stream_pair()
await left.send_all(b"123")
assert await right.receive_some() == b"123"
await right.send_all(b"456")
assert await left.receive_some() == b"456"
But if you read the docs for :class:`~trio.StapledStream` and
:func:`memory_stream_one_way_pair`, you'll see that all the pieces
involved in wiring this up are public APIs, so you can adjust to suit the
requirements of your tests. For example, here's how to tweak a stream so
that data flowing from left to right trickles in one byte at a time (but
data flowing from right to left proceeds at full speed)::
left, right = memory_stream_pair()
async def trickle():
# left is a StapledStream, and left.send_stream is a MemorySendStream
# right is a StapledStream, and right.recv_stream is a MemoryReceiveStream
while memory_stream_pump(left.send_stream, right.recv_stream, max_bytes=1):
# Pause between each byte
await trio.sleep(1)
# Normally this send_all_hook calls memory_stream_pump directly without
# passing in a max_bytes. We replace it with our custom version:
left.send_stream.send_all_hook = trickle
And here's a simple test using our modified stream objects::
async def sender():
await left.send_all(b"12345")
await left.send_eof()
async def receiver():
async for data in right:
print(data)
async with trio.open_nursery() as nursery:
nursery.start_soon(sender)
nursery.start_soon(receiver)
By default, this will print ``b"12345"`` and then immediately exit; with
our trickle stream it instead sleeps 1 second, then prints ``b"1"``, then
sleeps 1 second, then prints ``b"2"``, etc.
Pro-tip: you can insert sleep calls (like in our example above) to
manipulate the flow of data across tasks... and then use
:class:`MockClock` and its :attr:`~MockClock.autojump_threshold`
functionality to keep your test suite running quickly.
If you want to stress test a protocol implementation, one nice trick is to
use the :mod:`random` module (preferably with a fixed seed) to move random
numbers of bytes at a time, and insert random sleeps in between them. You
can also set up a custom :attr:`~MemoryReceiveStream.receive_some_hook` if
you want to manipulate things on the receiving side, and not just the
sending side.
"""
return _make_stapled_pair(memory_stream_one_way_pair) | [
"def",
"memory_stream_pair",
"(",
")",
":",
"return",
"_make_stapled_pair",
"(",
"memory_stream_one_way_pair",
")"
] | [
341,
0
] | [
415,
57
] | python | en | ['en', 'en', 'en'] | True |
lockstep_stream_one_way_pair | () | Create a connected, pure Python, unidirectional stream where data flows
in lockstep.
Returns:
A tuple
(:class:`~trio.abc.SendStream`, :class:`~trio.abc.ReceiveStream`).
This stream has *absolutely no* buffering. Each call to
:meth:`~trio.abc.SendStream.send_all` will block until all the given data
has been returned by a call to
:meth:`~trio.abc.ReceiveStream.receive_some`.
This can be useful for testing flow control mechanisms in an extreme case,
or for setting up "clogged" streams to use with
:func:`check_one_way_stream` and friends.
In addition to fulfilling the :class:`~trio.abc.SendStream` and
:class:`~trio.abc.ReceiveStream` interfaces, the return objects
also have a synchronous ``close`` method.
| Create a connected, pure Python, unidirectional stream where data flows
in lockstep. | def lockstep_stream_one_way_pair():
"""Create a connected, pure Python, unidirectional stream where data flows
in lockstep.
Returns:
A tuple
(:class:`~trio.abc.SendStream`, :class:`~trio.abc.ReceiveStream`).
This stream has *absolutely no* buffering. Each call to
:meth:`~trio.abc.SendStream.send_all` will block until all the given data
has been returned by a call to
:meth:`~trio.abc.ReceiveStream.receive_some`.
This can be useful for testing flow control mechanisms in an extreme case,
or for setting up "clogged" streams to use with
:func:`check_one_way_stream` and friends.
In addition to fulfilling the :class:`~trio.abc.SendStream` and
:class:`~trio.abc.ReceiveStream` interfaces, the return objects
also have a synchronous ``close`` method.
"""
lbq = _LockstepByteQueue()
return _LockstepSendStream(lbq), _LockstepReceiveStream(lbq) | [
"def",
"lockstep_stream_one_way_pair",
"(",
")",
":",
"lbq",
"=",
"_LockstepByteQueue",
"(",
")",
"return",
"_LockstepSendStream",
"(",
"lbq",
")",
",",
"_LockstepReceiveStream",
"(",
"lbq",
")"
] | [
550,
0
] | [
574,
64
] | python | en | ['en', 'en', 'en'] | True |
lockstep_stream_pair | () | Create a connected, pure-Python, bidirectional stream where data flows
in lockstep.
Returns:
A tuple (:class:`~trio.StapledStream`, :class:`~trio.StapledStream`).
This is a convenience function that creates two one-way streams using
:func:`lockstep_stream_one_way_pair`, and then uses
:class:`~trio.StapledStream` to combine them into a single bidirectional
stream.
| Create a connected, pure-Python, bidirectional stream where data flows
in lockstep. | def lockstep_stream_pair():
"""Create a connected, pure-Python, bidirectional stream where data flows
in lockstep.
Returns:
A tuple (:class:`~trio.StapledStream`, :class:`~trio.StapledStream`).
This is a convenience function that creates two one-way streams using
:func:`lockstep_stream_one_way_pair`, and then uses
:class:`~trio.StapledStream` to combine them into a single bidirectional
stream.
"""
return _make_stapled_pair(lockstep_stream_one_way_pair) | [
"def",
"lockstep_stream_pair",
"(",
")",
":",
"return",
"_make_stapled_pair",
"(",
"lockstep_stream_one_way_pair",
")"
] | [
577,
0
] | [
590,
59
] | python | en | ['en', 'en', 'en'] | True |
MemorySendStream.send_all | (self, data) | Places the given data into the object's internal buffer, and then
calls the :attr:`send_all_hook` (if any).
| Places the given data into the object's internal buffer, and then
calls the :attr:`send_all_hook` (if any). | async def send_all(self, data):
"""Places the given data into the object's internal buffer, and then
calls the :attr:`send_all_hook` (if any).
"""
# Execute two checkpoints so we have more of a chance to detect
# buggy user code that calls this twice at the same time.
with self._conflict_detector:
await _core.checkpoint()
await _core.checkpoint()
self._outgoing.put(data)
if self.send_all_hook is not None:
await self.send_all_hook() | [
"async",
"def",
"send_all",
"(",
"self",
",",
"data",
")",
":",
"# Execute two checkpoints so we have more of a chance to detect",
"# buggy user code that calls this twice at the same time.",
"with",
"self",
".",
"_conflict_detector",
":",
"await",
"_core",
".",
"checkpoint",
"(",
")",
"await",
"_core",
".",
"checkpoint",
"(",
")",
"self",
".",
"_outgoing",
".",
"put",
"(",
"data",
")",
"if",
"self",
".",
"send_all_hook",
"is",
"not",
"None",
":",
"await",
"self",
".",
"send_all_hook",
"(",
")"
] | [
110,
4
] | [
122,
42
] | python | en | ['en', 'en', 'en'] | True |
MemorySendStream.wait_send_all_might_not_block | (self) | Calls the :attr:`wait_send_all_might_not_block_hook` (if any), and
then returns immediately.
| Calls the :attr:`wait_send_all_might_not_block_hook` (if any), and
then returns immediately. | async def wait_send_all_might_not_block(self):
"""Calls the :attr:`wait_send_all_might_not_block_hook` (if any), and
then returns immediately.
"""
# Execute two checkpoints so we have more of a chance to detect
# buggy user code that calls this twice at the same time.
with self._conflict_detector:
await _core.checkpoint()
await _core.checkpoint()
# check for being closed:
self._outgoing.put(b"")
if self.wait_send_all_might_not_block_hook is not None:
await self.wait_send_all_might_not_block_hook() | [
"async",
"def",
"wait_send_all_might_not_block",
"(",
"self",
")",
":",
"# Execute two checkpoints so we have more of a chance to detect",
"# buggy user code that calls this twice at the same time.",
"with",
"self",
".",
"_conflict_detector",
":",
"await",
"_core",
".",
"checkpoint",
"(",
")",
"await",
"_core",
".",
"checkpoint",
"(",
")",
"# check for being closed:",
"self",
".",
"_outgoing",
".",
"put",
"(",
"b\"\"",
")",
"if",
"self",
".",
"wait_send_all_might_not_block_hook",
"is",
"not",
"None",
":",
"await",
"self",
".",
"wait_send_all_might_not_block_hook",
"(",
")"
] | [
124,
4
] | [
137,
63
] | python | en | ['en', 'en', 'en'] | True |
MemorySendStream.close | (self) | Marks this stream as closed, and then calls the :attr:`close_hook`
(if any).
| Marks this stream as closed, and then calls the :attr:`close_hook`
(if any). | def close(self):
"""Marks this stream as closed, and then calls the :attr:`close_hook`
(if any).
"""
# XXX should this cancel any pending calls to the send_all_hook and
# wait_send_all_might_not_block_hook? Those are the only places where
# send_all and wait_send_all_might_not_block can be blocked.
#
# The way we set things up, send_all_hook is memory_stream_pump, and
# wait_send_all_might_not_block_hook is unset. memory_stream_pump is
# synchronous. So normally, send_all and wait_send_all_might_not_block
# cannot block at all.
self._outgoing.close()
if self.close_hook is not None:
self.close_hook() | [
"def",
"close",
"(",
"self",
")",
":",
"# XXX should this cancel any pending calls to the send_all_hook and",
"# wait_send_all_might_not_block_hook? Those are the only places where",
"# send_all and wait_send_all_might_not_block can be blocked.",
"#",
"# The way we set things up, send_all_hook is memory_stream_pump, and",
"# wait_send_all_might_not_block_hook is unset. memory_stream_pump is",
"# synchronous. So normally, send_all and wait_send_all_might_not_block",
"# cannot block at all.",
"self",
".",
"_outgoing",
".",
"close",
"(",
")",
"if",
"self",
".",
"close_hook",
"is",
"not",
"None",
":",
"self",
".",
"close_hook",
"(",
")"
] | [
139,
4
] | [
154,
29
] | python | en | ['en', 'en', 'en'] | True |
MemorySendStream.aclose | (self) | Same as :meth:`close`, but async. | Same as :meth:`close`, but async. | async def aclose(self):
"""Same as :meth:`close`, but async."""
self.close()
await _core.checkpoint() | [
"async",
"def",
"aclose",
"(",
"self",
")",
":",
"self",
".",
"close",
"(",
")",
"await",
"_core",
".",
"checkpoint",
"(",
")"
] | [
156,
4
] | [
159,
32
] | python | en | ['en', 'gd', 'en'] | True |
MemorySendStream.get_data | (self, max_bytes=None) | Retrieves data from the internal buffer, blocking if necessary.
Args:
max_bytes (int or None): The maximum amount of data to
retrieve. None (the default) means to retrieve all the data
that's present (but still blocks until at least one byte is
available).
Returns:
If this stream has been closed, an empty bytearray. Otherwise, the
requested data.
| Retrieves data from the internal buffer, blocking if necessary. | async def get_data(self, max_bytes=None):
"""Retrieves data from the internal buffer, blocking if necessary.
Args:
max_bytes (int or None): The maximum amount of data to
retrieve. None (the default) means to retrieve all the data
that's present (but still blocks until at least one byte is
available).
Returns:
If this stream has been closed, an empty bytearray. Otherwise, the
requested data.
"""
return await self._outgoing.get(max_bytes) | [
"async",
"def",
"get_data",
"(",
"self",
",",
"max_bytes",
"=",
"None",
")",
":",
"return",
"await",
"self",
".",
"_outgoing",
".",
"get",
"(",
"max_bytes",
")"
] | [
161,
4
] | [
175,
50
] | python | en | ['en', 'en', 'en'] | True |
MemorySendStream.get_data_nowait | (self, max_bytes=None) | Retrieves data from the internal buffer, but doesn't block.
See :meth:`get_data` for details.
Raises:
trio.WouldBlock: if no data is available to retrieve.
| Retrieves data from the internal buffer, but doesn't block. | def get_data_nowait(self, max_bytes=None):
"""Retrieves data from the internal buffer, but doesn't block.
See :meth:`get_data` for details.
Raises:
trio.WouldBlock: if no data is available to retrieve.
"""
return self._outgoing.get_nowait(max_bytes) | [
"def",
"get_data_nowait",
"(",
"self",
",",
"max_bytes",
"=",
"None",
")",
":",
"return",
"self",
".",
"_outgoing",
".",
"get_nowait",
"(",
"max_bytes",
")"
] | [
177,
4
] | [
186,
51
] | python | en | ['en', 'lb', 'en'] | True |
MemoryReceiveStream.receive_some | (self, max_bytes=None) | Calls the :attr:`receive_some_hook` (if any), and then retrieves
data from the internal buffer, blocking if necessary.
| Calls the :attr:`receive_some_hook` (if any), and then retrieves
data from the internal buffer, blocking if necessary. | async def receive_some(self, max_bytes=None):
"""Calls the :attr:`receive_some_hook` (if any), and then retrieves
data from the internal buffer, blocking if necessary.
"""
# Execute two checkpoints so we have more of a chance to detect
# buggy user code that calls this twice at the same time.
with self._conflict_detector:
await _core.checkpoint()
await _core.checkpoint()
if self._closed:
raise _core.ClosedResourceError
if self.receive_some_hook is not None:
await self.receive_some_hook()
# self._incoming's closure state tracks whether we got an EOF.
# self._closed tracks whether we, ourselves, are closed.
# self.close() sends an EOF to wake us up and sets self._closed,
# so after we wake up we have to check self._closed again.
data = await self._incoming.get(max_bytes)
if self._closed:
raise _core.ClosedResourceError
return data | [
"async",
"def",
"receive_some",
"(",
"self",
",",
"max_bytes",
"=",
"None",
")",
":",
"# Execute two checkpoints so we have more of a chance to detect",
"# buggy user code that calls this twice at the same time.",
"with",
"self",
".",
"_conflict_detector",
":",
"await",
"_core",
".",
"checkpoint",
"(",
")",
"await",
"_core",
".",
"checkpoint",
"(",
")",
"if",
"self",
".",
"_closed",
":",
"raise",
"_core",
".",
"ClosedResourceError",
"if",
"self",
".",
"receive_some_hook",
"is",
"not",
"None",
":",
"await",
"self",
".",
"receive_some_hook",
"(",
")",
"# self._incoming's closure state tracks whether we got an EOF.",
"# self._closed tracks whether we, ourselves, are closed.",
"# self.close() sends an EOF to wake us up and sets self._closed,",
"# so after we wake up we have to check self._closed again.",
"data",
"=",
"await",
"self",
".",
"_incoming",
".",
"get",
"(",
"max_bytes",
")",
"if",
"self",
".",
"_closed",
":",
"raise",
"_core",
".",
"ClosedResourceError",
"return",
"data"
] | [
215,
4
] | [
236,
23
] | python | en | ['en', 'en', 'en'] | True |
MemoryReceiveStream.close | (self) | Discards any pending data from the internal buffer, and marks this
stream as closed.
| Discards any pending data from the internal buffer, and marks this
stream as closed. | def close(self):
"""Discards any pending data from the internal buffer, and marks this
stream as closed.
"""
self._closed = True
self._incoming.close_and_wipe()
if self.close_hook is not None:
self.close_hook() | [
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"_closed",
"=",
"True",
"self",
".",
"_incoming",
".",
"close_and_wipe",
"(",
")",
"if",
"self",
".",
"close_hook",
"is",
"not",
"None",
":",
"self",
".",
"close_hook",
"(",
")"
] | [
238,
4
] | [
246,
29
] | python | en | ['en', 'en', 'en'] | True |
MemoryReceiveStream.aclose | (self) | Same as :meth:`close`, but async. | Same as :meth:`close`, but async. | async def aclose(self):
"""Same as :meth:`close`, but async."""
self.close()
await _core.checkpoint() | [
"async",
"def",
"aclose",
"(",
"self",
")",
":",
"self",
".",
"close",
"(",
")",
"await",
"_core",
".",
"checkpoint",
"(",
")"
] | [
248,
4
] | [
251,
32
] | python | en | ['en', 'gd', 'en'] | True |
MemoryReceiveStream.put_data | (self, data) | Appends the given data to the internal buffer. | Appends the given data to the internal buffer. | def put_data(self, data):
"""Appends the given data to the internal buffer."""
self._incoming.put(data) | [
"def",
"put_data",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"_incoming",
".",
"put",
"(",
"data",
")"
] | [
253,
4
] | [
255,
32
] | python | en | ['en', 'en', 'en'] | True |
MemoryReceiveStream.put_eof | (self) | Adds an end-of-file marker to the internal buffer. | Adds an end-of-file marker to the internal buffer. | def put_eof(self):
"""Adds an end-of-file marker to the internal buffer."""
self._incoming.close() | [
"def",
"put_eof",
"(",
"self",
")",
":",
"self",
".",
"_incoming",
".",
"close",
"(",
")"
] | [
257,
4
] | [
259,
30
] | python | en | ['en', 'en', 'en'] | True |
unformat_pytest_explanation | (s) |
Undo _pytest.assertion.util.format_explanation
|
Undo _pytest.assertion.util.format_explanation
| def unformat_pytest_explanation(s):
"""
Undo _pytest.assertion.util.format_explanation
"""
return s.replace("\\n", "\n") | [
"def",
"unformat_pytest_explanation",
"(",
"s",
")",
":",
"return",
"s",
".",
"replace",
"(",
"\"\\\\n\"",
",",
"\"\\n\"",
")"
] | [
28,
0
] | [
32,
33
] | python | en | ['en', 'error', 'th'] | False |
_is_bool_supported | () |
Type "bool" is not supported before 2.9
|
Type "bool" is not supported before 2.9
| def _is_bool_supported():
"""
Type "bool" is not supported before 2.9
"""
try:
from pytest import __version__
from distutils import version
return version.LooseVersion(str(__version__)) >= version.LooseVersion("2.9")
except ImportError:
return False | [
"def",
"_is_bool_supported",
"(",
")",
":",
"try",
":",
"from",
"pytest",
"import",
"__version__",
"from",
"distutils",
"import",
"version",
"return",
"version",
".",
"LooseVersion",
"(",
"str",
"(",
"__version__",
")",
")",
">=",
"version",
".",
"LooseVersion",
"(",
"\"2.9\"",
")",
"except",
"ImportError",
":",
"return",
"False"
] | [
64,
0
] | [
73,
20
] | python | en | ['en', 'error', 'th'] | False |
EchoTeamCityMessages.pytest_runtest_logreport | (self, report) |
:type report: _pytest.runner.TestReport
|
:type report: _pytest.runner.TestReport
| def pytest_runtest_logreport(self, report):
"""
:type report: _pytest.runner.TestReport
"""
test_id = self.format_test_id(report.nodeid, report.location)
duration = timedelta(seconds=report.duration)
if report.passed:
# Do not report passed setup/teardown if no output
if report.when == 'call':
self.ensure_test_start_reported(test_id)
if not self.skip_passed_output:
self.report_test_output(report, test_id)
self.report_test_finished(test_id, duration)
else:
if self.report_has_output(report) and not self.skip_passed_output:
block_name = "test " + report.when
self.teamcity.blockOpened(block_name, flowId=test_id)
self.report_test_output(report, test_id)
self.teamcity.blockClosed(block_name, flowId=test_id)
elif report.failed:
if report.when == 'call':
self.report_test_failure(test_id, report)
elif report.when == 'setup':
if self.report_has_output(report):
self.teamcity.blockOpened("test setup", flowId=test_id)
self.report_test_output(report, test_id)
self.teamcity.blockClosed("test setup", flowId=test_id)
self.report_test_failure(test_id, report, message="test setup failed", report_output=False)
elif report.when == 'teardown':
# Report failed teardown as a separate test as original test is already finished
self.report_test_failure(test_id + "_teardown", report)
elif report.skipped:
self.report_test_skip(test_id, report) | [
"def",
"pytest_runtest_logreport",
"(",
"self",
",",
"report",
")",
":",
"test_id",
"=",
"self",
".",
"format_test_id",
"(",
"report",
".",
"nodeid",
",",
"report",
".",
"location",
")",
"duration",
"=",
"timedelta",
"(",
"seconds",
"=",
"report",
".",
"duration",
")",
"if",
"report",
".",
"passed",
":",
"# Do not report passed setup/teardown if no output",
"if",
"report",
".",
"when",
"==",
"'call'",
":",
"self",
".",
"ensure_test_start_reported",
"(",
"test_id",
")",
"if",
"not",
"self",
".",
"skip_passed_output",
":",
"self",
".",
"report_test_output",
"(",
"report",
",",
"test_id",
")",
"self",
".",
"report_test_finished",
"(",
"test_id",
",",
"duration",
")",
"else",
":",
"if",
"self",
".",
"report_has_output",
"(",
"report",
")",
"and",
"not",
"self",
".",
"skip_passed_output",
":",
"block_name",
"=",
"\"test \"",
"+",
"report",
".",
"when",
"self",
".",
"teamcity",
".",
"blockOpened",
"(",
"block_name",
",",
"flowId",
"=",
"test_id",
")",
"self",
".",
"report_test_output",
"(",
"report",
",",
"test_id",
")",
"self",
".",
"teamcity",
".",
"blockClosed",
"(",
"block_name",
",",
"flowId",
"=",
"test_id",
")",
"elif",
"report",
".",
"failed",
":",
"if",
"report",
".",
"when",
"==",
"'call'",
":",
"self",
".",
"report_test_failure",
"(",
"test_id",
",",
"report",
")",
"elif",
"report",
".",
"when",
"==",
"'setup'",
":",
"if",
"self",
".",
"report_has_output",
"(",
"report",
")",
":",
"self",
".",
"teamcity",
".",
"blockOpened",
"(",
"\"test setup\"",
",",
"flowId",
"=",
"test_id",
")",
"self",
".",
"report_test_output",
"(",
"report",
",",
"test_id",
")",
"self",
".",
"teamcity",
".",
"blockClosed",
"(",
"\"test setup\"",
",",
"flowId",
"=",
"test_id",
")",
"self",
".",
"report_test_failure",
"(",
"test_id",
",",
"report",
",",
"message",
"=",
"\"test setup failed\"",
",",
"report_output",
"=",
"False",
")",
"elif",
"report",
".",
"when",
"==",
"'teardown'",
":",
"# Report failed teardown as a separate test as original test is already finished",
"self",
".",
"report_test_failure",
"(",
"test_id",
"+",
"\"_teardown\"",
",",
"report",
")",
"elif",
"report",
".",
"skipped",
":",
"self",
".",
"report_test_skip",
"(",
"test_id",
",",
"report",
")"
] | [
332,
4
] | [
367,
50
] | python | en | ['en', 'error', 'th'] | False |
test_notebook_execution_with_pandas_backend | (
titanic_data_context_no_data_docs_no_checkpoint_store,
) |
This tests that the notebook is written to disk and executes without error.
To set this test up we:
- create a scaffold notebook
- verify that no validations have happened
We then:
- execute that notebook (Note this will raise various errors like
CellExecutionError if any cell in the notebook fails
- create a new context from disk
- verify that a validation has been run with our expectation suite
|
This tests that the notebook is written to disk and executes without error. | def test_notebook_execution_with_pandas_backend(
titanic_data_context_no_data_docs_no_checkpoint_store,
):
"""
This tests that the notebook is written to disk and executes without error.
To set this test up we:
- create a scaffold notebook
- verify that no validations have happened
We then:
- execute that notebook (Note this will raise various errors like
CellExecutionError if any cell in the notebook fails
- create a new context from disk
- verify that a validation has been run with our expectation suite
"""
# Since we'll run the notebook, we use a context with no data docs to avoid
# the renderer's default behavior of building and opening docs, which is not
# part of this test.
context = titanic_data_context_no_data_docs_no_checkpoint_store
root_dir = context.root_directory
uncommitted_dir = os.path.join(root_dir, "uncommitted")
suite_name = "my_suite"
suite = context.create_expectation_suite(suite_name)
csv_path = os.path.join(root_dir, "..", "data", "Titanic.csv")
batch_kwargs = {"datasource": "mydatasource", "path": csv_path}
# Sanity check test setup
assert context.list_expectation_suite_names() == [suite_name]
assert context.list_datasources() == [
{
"module_name": "great_expectations.datasource",
"class_name": "PandasDatasource",
"data_asset_type": {
"module_name": "great_expectations.dataset",
"class_name": "PandasDataset",
},
"batch_kwargs_generators": {
"mygenerator": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": "../data",
}
},
"name": "mydatasource",
}
]
assert context.get_validation_result(suite_name) == {}
notebook_path = os.path.join(uncommitted_dir, f"{suite_name}.ipynb")
assert not os.path.isfile(notebook_path)
# Create notebook
renderer = SuiteScaffoldNotebookRenderer(
titanic_data_context_no_data_docs_no_checkpoint_store, suite, batch_kwargs
)
renderer.render_to_disk(notebook_path)
assert os.path.isfile(notebook_path)
with open(notebook_path) as f:
nb = nbformat.read(f, as_version=4)
# Run notebook
ep = ExecutePreprocessor(timeout=600, kernel_name="python3")
ep.preprocess(nb, {"metadata": {"path": uncommitted_dir}})
# Useful to inspect executed notebook
output_notebook = os.path.join(uncommitted_dir, "output.ipynb")
with open(output_notebook, "w") as f:
nbformat.write(nb, f)
# Assertions about output
context = DataContext(root_dir)
obs_validation_result = context.get_validation_result(suite_name)
assert obs_validation_result.statistics == {
"evaluated_expectations": 2,
"successful_expectations": 2,
"unsuccessful_expectations": 0,
"success_percent": 100,
}
suite = context.get_expectation_suite(suite_name)
assert suite.expectations
(
columns_with_expectations,
expectations_from_suite,
) = get_set_of_columns_and_expectations_from_suite(suite)
expected_expectations = {
"expect_table_columns_to_match_ordered_list",
"expect_table_row_count_to_be_between",
}
assert columns_with_expectations == set()
assert expectations_from_suite == expected_expectations | [
"def",
"test_notebook_execution_with_pandas_backend",
"(",
"titanic_data_context_no_data_docs_no_checkpoint_store",
",",
")",
":",
"# Since we'll run the notebook, we use a context with no data docs to avoid",
"# the renderer's default behavior of building and opening docs, which is not",
"# part of this test.",
"context",
"=",
"titanic_data_context_no_data_docs_no_checkpoint_store",
"root_dir",
"=",
"context",
".",
"root_directory",
"uncommitted_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
")",
"suite_name",
"=",
"\"my_suite\"",
"suite",
"=",
"context",
".",
"create_expectation_suite",
"(",
"suite_name",
")",
"csv_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"..\"",
",",
"\"data\"",
",",
"\"Titanic.csv\"",
")",
"batch_kwargs",
"=",
"{",
"\"datasource\"",
":",
"\"mydatasource\"",
",",
"\"path\"",
":",
"csv_path",
"}",
"# Sanity check test setup",
"assert",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"==",
"[",
"suite_name",
"]",
"assert",
"context",
".",
"list_datasources",
"(",
")",
"==",
"[",
"{",
"\"module_name\"",
":",
"\"great_expectations.datasource\"",
",",
"\"class_name\"",
":",
"\"PandasDatasource\"",
",",
"\"data_asset_type\"",
":",
"{",
"\"module_name\"",
":",
"\"great_expectations.dataset\"",
",",
"\"class_name\"",
":",
"\"PandasDataset\"",
",",
"}",
",",
"\"batch_kwargs_generators\"",
":",
"{",
"\"mygenerator\"",
":",
"{",
"\"class_name\"",
":",
"\"SubdirReaderBatchKwargsGenerator\"",
",",
"\"base_directory\"",
":",
"\"../data\"",
",",
"}",
"}",
",",
"\"name\"",
":",
"\"mydatasource\"",
",",
"}",
"]",
"assert",
"context",
".",
"get_validation_result",
"(",
"suite_name",
")",
"==",
"{",
"}",
"notebook_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"uncommitted_dir",
",",
"f\"{suite_name}.ipynb\"",
")",
"assert",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"notebook_path",
")",
"# Create notebook",
"renderer",
"=",
"SuiteScaffoldNotebookRenderer",
"(",
"titanic_data_context_no_data_docs_no_checkpoint_store",
",",
"suite",
",",
"batch_kwargs",
")",
"renderer",
".",
"render_to_disk",
"(",
"notebook_path",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"notebook_path",
")",
"with",
"open",
"(",
"notebook_path",
")",
"as",
"f",
":",
"nb",
"=",
"nbformat",
".",
"read",
"(",
"f",
",",
"as_version",
"=",
"4",
")",
"# Run notebook",
"ep",
"=",
"ExecutePreprocessor",
"(",
"timeout",
"=",
"600",
",",
"kernel_name",
"=",
"\"python3\"",
")",
"ep",
".",
"preprocess",
"(",
"nb",
",",
"{",
"\"metadata\"",
":",
"{",
"\"path\"",
":",
"uncommitted_dir",
"}",
"}",
")",
"# Useful to inspect executed notebook",
"output_notebook",
"=",
"os",
".",
"path",
".",
"join",
"(",
"uncommitted_dir",
",",
"\"output.ipynb\"",
")",
"with",
"open",
"(",
"output_notebook",
",",
"\"w\"",
")",
"as",
"f",
":",
"nbformat",
".",
"write",
"(",
"nb",
",",
"f",
")",
"# Assertions about output",
"context",
"=",
"DataContext",
"(",
"root_dir",
")",
"obs_validation_result",
"=",
"context",
".",
"get_validation_result",
"(",
"suite_name",
")",
"assert",
"obs_validation_result",
".",
"statistics",
"==",
"{",
"\"evaluated_expectations\"",
":",
"2",
",",
"\"successful_expectations\"",
":",
"2",
",",
"\"unsuccessful_expectations\"",
":",
"0",
",",
"\"success_percent\"",
":",
"100",
",",
"}",
"suite",
"=",
"context",
".",
"get_expectation_suite",
"(",
"suite_name",
")",
"assert",
"suite",
".",
"expectations",
"(",
"columns_with_expectations",
",",
"expectations_from_suite",
",",
")",
"=",
"get_set_of_columns_and_expectations_from_suite",
"(",
"suite",
")",
"expected_expectations",
"=",
"{",
"\"expect_table_columns_to_match_ordered_list\"",
",",
"\"expect_table_row_count_to_be_between\"",
",",
"}",
"assert",
"columns_with_expectations",
"==",
"set",
"(",
")",
"assert",
"expectations_from_suite",
"==",
"expected_expectations"
] | [
12,
0
] | [
104,
59
] | python | en | ['en', 'error', 'th'] | False |
adjustData | (img,mask) |
Pre-
:param img:
:param mask:
:return:
|
Pre-
:param img:
:param mask:
:return:
| def adjustData(img,mask):
"""
Pre-
:param img:
:param mask:
:return:
"""
if np.max(img) > 1:
img = img / 255
mask = mask / 255
mask[mask > 0.5] = 1
mask[mask <= 0.5] = 0
mask = to_categorical(mask, num_classes=2)
return img,mask | [
"def",
"adjustData",
"(",
"img",
",",
"mask",
")",
":",
"if",
"np",
".",
"max",
"(",
"img",
")",
">",
"1",
":",
"img",
"=",
"img",
"/",
"255",
"mask",
"=",
"mask",
"/",
"255",
"mask",
"[",
"mask",
">",
"0.5",
"]",
"=",
"1",
"mask",
"[",
"mask",
"<=",
"0.5",
"]",
"=",
"0",
"mask",
"=",
"to_categorical",
"(",
"mask",
",",
"num_classes",
"=",
"2",
")",
"return",
"img",
",",
"mask"
] | [
13,
0
] | [
26,
19
] | python | en | ['en', 'error', 'th'] | False |
library_install_load_check | (
python_import_name: str, pip_library_name: str
) |
Dynamically load a module from strings, attempt a pip install or raise a helpful error.
:return: True if the library was loaded successfully, False otherwise
Args:
pip_library_name: name of the library to load
python_import_name (str): a module to import to verify installation
|
Dynamically load a module from strings, attempt a pip install or raise a helpful error. | def library_install_load_check(
python_import_name: str, pip_library_name: str
) -> Optional[int]:
"""
Dynamically load a module from strings, attempt a pip install or raise a helpful error.
:return: True if the library was loaded successfully, False otherwise
Args:
pip_library_name: name of the library to load
python_import_name (str): a module to import to verify installation
"""
if is_library_loadable(library_name=python_import_name):
return None
confirm_prompt: str = f"""Great Expectations relies on the library `{python_import_name}` to connect to your data, \
but the package `{pip_library_name}` containing this library is not installed.
Would you like Great Expectations to try to execute `pip install {pip_library_name}` for you?"""
continuation_message: str = f"""\nOK, exiting now.
- Please execute `pip install {pip_library_name}` before trying again."""
pip_install_confirmed = toolkit.confirm_proceed_or_exit(
confirm_prompt=confirm_prompt,
continuation_message=continuation_message,
exit_on_no=True,
exit_code=1,
)
if not pip_install_confirmed:
cli_message(continuation_message)
sys.exit(1)
status_code: int = execute_shell_command_with_progress_polling(
f"pip install {pip_library_name}"
)
# project_distribution: Distribution = get_project_distribution()
# if project_distribution:
# project_name: str = project_distribution.metadata['Name']
# version: str = project_distribution.metadata['Version']
#
# pkg_resources.working_set = pkg_resources.WorkingSet._build_master()
working_set: WorkingSet = pkg_resources.working_set
# noinspection SpellCheckingInspection
distr: Distribution = pkg_resources.get_distribution(dist=pip_library_name)
pkg_resources.WorkingSet.add_entry(self=working_set, entry=distr.key)
library_loadable: bool = is_library_loadable(library_name=python_import_name)
if status_code == 0 and library_loadable:
return 0
if not library_loadable:
cli_message(
f"""<red>ERROR: Great Expectations relies on the library `{pip_library_name}` to connect to your data.</red>
- Please execute `pip install {pip_library_name}` before trying again."""
)
return 1
return status_code | [
"def",
"library_install_load_check",
"(",
"python_import_name",
":",
"str",
",",
"pip_library_name",
":",
"str",
")",
"->",
"Optional",
"[",
"int",
"]",
":",
"if",
"is_library_loadable",
"(",
"library_name",
"=",
"python_import_name",
")",
":",
"return",
"None",
"confirm_prompt",
":",
"str",
"=",
"f\"\"\"Great Expectations relies on the library `{python_import_name}` to connect to your data, \\\nbut the package `{pip_library_name}` containing this library is not installed.\n Would you like Great Expectations to try to execute `pip install {pip_library_name}` for you?\"\"\"",
"continuation_message",
":",
"str",
"=",
"f\"\"\"\\nOK, exiting now.\n - Please execute `pip install {pip_library_name}` before trying again.\"\"\"",
"pip_install_confirmed",
"=",
"toolkit",
".",
"confirm_proceed_or_exit",
"(",
"confirm_prompt",
"=",
"confirm_prompt",
",",
"continuation_message",
"=",
"continuation_message",
",",
"exit_on_no",
"=",
"True",
",",
"exit_code",
"=",
"1",
",",
")",
"if",
"not",
"pip_install_confirmed",
":",
"cli_message",
"(",
"continuation_message",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"status_code",
":",
"int",
"=",
"execute_shell_command_with_progress_polling",
"(",
"f\"pip install {pip_library_name}\"",
")",
"# project_distribution: Distribution = get_project_distribution()",
"# if project_distribution:",
"# project_name: str = project_distribution.metadata['Name']",
"# version: str = project_distribution.metadata['Version']",
"#",
"# pkg_resources.working_set = pkg_resources.WorkingSet._build_master()",
"working_set",
":",
"WorkingSet",
"=",
"pkg_resources",
".",
"working_set",
"# noinspection SpellCheckingInspection",
"distr",
":",
"Distribution",
"=",
"pkg_resources",
".",
"get_distribution",
"(",
"dist",
"=",
"pip_library_name",
")",
"pkg_resources",
".",
"WorkingSet",
".",
"add_entry",
"(",
"self",
"=",
"working_set",
",",
"entry",
"=",
"distr",
".",
"key",
")",
"library_loadable",
":",
"bool",
"=",
"is_library_loadable",
"(",
"library_name",
"=",
"python_import_name",
")",
"if",
"status_code",
"==",
"0",
"and",
"library_loadable",
":",
"return",
"0",
"if",
"not",
"library_loadable",
":",
"cli_message",
"(",
"f\"\"\"<red>ERROR: Great Expectations relies on the library `{pip_library_name}` to connect to your data.</red>\n - Please execute `pip install {pip_library_name}` before trying again.\"\"\"",
")",
"return",
"1",
"return",
"status_code"
] | [
43,
0
] | [
102,
22
] | python | en | ['en', 'error', 'th'] | False |
FAISSDocumentStore.__init__ | (
self,
sql_url: str = "sqlite:///",
vector_dim: int = 768,
faiss_index_factory_str: str = "Flat",
faiss_index: Optional[faiss.swigfaiss.Index] = None,
return_embedding: bool = False,
update_existing_documents: bool = False,
index: str = "document",
similarity: str = "dot_product",
embedding_field: str = "embedding",
progress_bar: bool = True,
**kwargs,
) |
:param sql_url: SQL connection URL for database. It defaults to local file based SQLite DB. For large scale
deployment, Postgres is recommended.
:param vector_dim: the embedding vector size.
:param faiss_index_factory_str: Create a new FAISS index of the specified type.
The type is determined from the given string following the conventions
of the original FAISS index factory.
Recommended options:
- "Flat" (default): Best accuracy (= exact). Becomes slow and RAM intense for > 1 Mio docs.
- "HNSW": Graph-based heuristic. If not further specified,
we use a RAM intense, but more accurate config:
HNSW256, efConstruction=256 and efSearch=256
- "IVFx,Flat": Inverted Index. Replace x with the number of centroids aka nlist.
Rule of thumb: nlist = 10 * sqrt (num_docs) is a good starting point.
For more details see:
- Overview of indices https://github.com/facebookresearch/faiss/wiki/Faiss-indexes
- Guideline for choosing an index https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index
- FAISS Index factory https://github.com/facebookresearch/faiss/wiki/The-index-factory
Benchmarks: XXX
:param faiss_index: Pass an existing FAISS Index, i.e. an empty one that you configured manually
or one with docs that you used in Haystack before and want to load again.
:param return_embedding: To return document embedding
:param update_existing_documents: Whether to update any existing documents with the same ID when adding
documents. When set as True, any document with an existing ID gets updated.
If set to False, an error is raised if the document ID of the document being
added already exists.
:param index: Name of index in document store to use.
:param similarity: The similarity function used to compare document vectors. 'dot_product' is the default sine it is
more performant with DPR embeddings. 'cosine' is recommended if you are using a Sentence BERT model.
:param embedding_field: Name of field containing an embedding vector.
:param progress_bar: Whether to show a tqdm progress bar or not.
Can be helpful to disable in production deployments to keep the logs clean.
|
:param sql_url: SQL connection URL for database. It defaults to local file based SQLite DB. For large scale
deployment, Postgres is recommended.
:param vector_dim: the embedding vector size.
:param faiss_index_factory_str: Create a new FAISS index of the specified type.
The type is determined from the given string following the conventions
of the original FAISS index factory.
Recommended options:
- "Flat" (default): Best accuracy (= exact). Becomes slow and RAM intense for > 1 Mio docs.
- "HNSW": Graph-based heuristic. If not further specified,
we use a RAM intense, but more accurate config:
HNSW256, efConstruction=256 and efSearch=256
- "IVFx,Flat": Inverted Index. Replace x with the number of centroids aka nlist.
Rule of thumb: nlist = 10 * sqrt (num_docs) is a good starting point.
For more details see:
- Overview of indices https://github.com/facebookresearch/faiss/wiki/Faiss-indexes
- Guideline for choosing an index https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index
- FAISS Index factory https://github.com/facebookresearch/faiss/wiki/The-index-factory
Benchmarks: XXX
:param faiss_index: Pass an existing FAISS Index, i.e. an empty one that you configured manually
or one with docs that you used in Haystack before and want to load again.
:param return_embedding: To return document embedding
:param update_existing_documents: Whether to update any existing documents with the same ID when adding
documents. When set as True, any document with an existing ID gets updated.
If set to False, an error is raised if the document ID of the document being
added already exists.
:param index: Name of index in document store to use.
:param similarity: The similarity function used to compare document vectors. 'dot_product' is the default sine it is
more performant with DPR embeddings. 'cosine' is recommended if you are using a Sentence BERT model.
:param embedding_field: Name of field containing an embedding vector.
:param progress_bar: Whether to show a tqdm progress bar or not.
Can be helpful to disable in production deployments to keep the logs clean.
| def __init__(
self,
sql_url: str = "sqlite:///",
vector_dim: int = 768,
faiss_index_factory_str: str = "Flat",
faiss_index: Optional[faiss.swigfaiss.Index] = None,
return_embedding: bool = False,
update_existing_documents: bool = False,
index: str = "document",
similarity: str = "dot_product",
embedding_field: str = "embedding",
progress_bar: bool = True,
**kwargs,
):
"""
:param sql_url: SQL connection URL for database. It defaults to local file based SQLite DB. For large scale
deployment, Postgres is recommended.
:param vector_dim: the embedding vector size.
:param faiss_index_factory_str: Create a new FAISS index of the specified type.
The type is determined from the given string following the conventions
of the original FAISS index factory.
Recommended options:
- "Flat" (default): Best accuracy (= exact). Becomes slow and RAM intense for > 1 Mio docs.
- "HNSW": Graph-based heuristic. If not further specified,
we use a RAM intense, but more accurate config:
HNSW256, efConstruction=256 and efSearch=256
- "IVFx,Flat": Inverted Index. Replace x with the number of centroids aka nlist.
Rule of thumb: nlist = 10 * sqrt (num_docs) is a good starting point.
For more details see:
- Overview of indices https://github.com/facebookresearch/faiss/wiki/Faiss-indexes
- Guideline for choosing an index https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index
- FAISS Index factory https://github.com/facebookresearch/faiss/wiki/The-index-factory
Benchmarks: XXX
:param faiss_index: Pass an existing FAISS Index, i.e. an empty one that you configured manually
or one with docs that you used in Haystack before and want to load again.
:param return_embedding: To return document embedding
:param update_existing_documents: Whether to update any existing documents with the same ID when adding
documents. When set as True, any document with an existing ID gets updated.
If set to False, an error is raised if the document ID of the document being
added already exists.
:param index: Name of index in document store to use.
:param similarity: The similarity function used to compare document vectors. 'dot_product' is the default sine it is
more performant with DPR embeddings. 'cosine' is recommended if you are using a Sentence BERT model.
:param embedding_field: Name of field containing an embedding vector.
:param progress_bar: Whether to show a tqdm progress bar or not.
Can be helpful to disable in production deployments to keep the logs clean.
"""
self.vector_dim = vector_dim
self.faiss_index_factory_str = faiss_index_factory_str
self.faiss_indexes: Dict[str, faiss.swigfaiss.Index] = {}
if faiss_index:
self.faiss_indexes[index] = faiss_index
else:
self.faiss_indexes[index] = self._create_new_index(
vector_dim=self.vector_dim, index_factory=faiss_index_factory_str, **kwargs
)
self.return_embedding = return_embedding
self.embedding_field = embedding_field
if similarity == "dot_product":
self.similarity = similarity
else:
raise ValueError("The FAISS document store can currently only support dot_product similarity. "
"Please set similarity=\"dot_product\"")
self.progress_bar = progress_bar
super().__init__(
url=sql_url,
update_existing_documents=update_existing_documents,
index=index
) | [
"def",
"__init__",
"(",
"self",
",",
"sql_url",
":",
"str",
"=",
"\"sqlite:///\"",
",",
"vector_dim",
":",
"int",
"=",
"768",
",",
"faiss_index_factory_str",
":",
"str",
"=",
"\"Flat\"",
",",
"faiss_index",
":",
"Optional",
"[",
"faiss",
".",
"swigfaiss",
".",
"Index",
"]",
"=",
"None",
",",
"return_embedding",
":",
"bool",
"=",
"False",
",",
"update_existing_documents",
":",
"bool",
"=",
"False",
",",
"index",
":",
"str",
"=",
"\"document\"",
",",
"similarity",
":",
"str",
"=",
"\"dot_product\"",
",",
"embedding_field",
":",
"str",
"=",
"\"embedding\"",
",",
"progress_bar",
":",
"bool",
"=",
"True",
",",
"*",
"*",
"kwargs",
",",
")",
":",
"self",
".",
"vector_dim",
"=",
"vector_dim",
"self",
".",
"faiss_index_factory_str",
"=",
"faiss_index_factory_str",
"self",
".",
"faiss_indexes",
":",
"Dict",
"[",
"str",
",",
"faiss",
".",
"swigfaiss",
".",
"Index",
"]",
"=",
"{",
"}",
"if",
"faiss_index",
":",
"self",
".",
"faiss_indexes",
"[",
"index",
"]",
"=",
"faiss_index",
"else",
":",
"self",
".",
"faiss_indexes",
"[",
"index",
"]",
"=",
"self",
".",
"_create_new_index",
"(",
"vector_dim",
"=",
"self",
".",
"vector_dim",
",",
"index_factory",
"=",
"faiss_index_factory_str",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"return_embedding",
"=",
"return_embedding",
"self",
".",
"embedding_field",
"=",
"embedding_field",
"if",
"similarity",
"==",
"\"dot_product\"",
":",
"self",
".",
"similarity",
"=",
"similarity",
"else",
":",
"raise",
"ValueError",
"(",
"\"The FAISS document store can currently only support dot_product similarity. \"",
"\"Please set similarity=\\\"dot_product\\\"\"",
")",
"self",
".",
"progress_bar",
"=",
"progress_bar",
"super",
"(",
")",
".",
"__init__",
"(",
"url",
"=",
"sql_url",
",",
"update_existing_documents",
"=",
"update_existing_documents",
",",
"index",
"=",
"index",
")"
] | [
29,
4
] | [
99,
9
] | python | en | ['en', 'error', 'th'] | False |
FAISSDocumentStore.write_documents | (
self, documents: Union[List[dict], List[Document]], index: Optional[str] = None, batch_size: int = 10_000
) |
Add new documents to the DocumentStore.
:param documents: List of `Dicts` or List of `Documents`. If they already contain the embeddings, we'll index
them right away in FAISS. If not, you can later call update_embeddings() to create & index them.
:param index: (SQL) index name for storing the docs and metadata
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
:return:
|
Add new documents to the DocumentStore. | def write_documents(
self, documents: Union[List[dict], List[Document]], index: Optional[str] = None, batch_size: int = 10_000
):
"""
Add new documents to the DocumentStore.
:param documents: List of `Dicts` or List of `Documents`. If they already contain the embeddings, we'll index
them right away in FAISS. If not, you can later call update_embeddings() to create & index them.
:param index: (SQL) index name for storing the docs and metadata
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
:return:
"""
index = index or self.index
if not self.faiss_indexes.get(index):
self.faiss_indexes[index] = self._create_new_index(
vector_dim=self.vector_dim, index_factory=self.faiss_index_factory_str
)
field_map = self._create_document_field_map()
document_objects = [Document.from_dict(d, field_map=field_map) if isinstance(d, dict) else d for d in documents]
add_vectors = False if document_objects[0].embedding is None else True
if self.update_existing_documents and add_vectors:
logger.warning("You have enabled `update_existing_documents` feature and "
"`FAISSDocumentStore` does not support update in existing `faiss_index`.\n"
"Please call `update_embeddings` method to repopulate `faiss_index`")
vector_id = self.faiss_indexes[index].ntotal
for i in range(0, len(document_objects), batch_size):
if add_vectors:
embeddings = [doc.embedding for doc in document_objects[i: i + batch_size]]
embeddings_to_index = np.array(embeddings, dtype="float32")
self.faiss_indexes[index].add(embeddings_to_index)
docs_to_write_in_sql = []
for doc in document_objects[i: i + batch_size]:
meta = doc.meta
if add_vectors:
meta["vector_id"] = vector_id
vector_id += 1
docs_to_write_in_sql.append(doc)
super(FAISSDocumentStore, self).write_documents(docs_to_write_in_sql, index=index) | [
"def",
"write_documents",
"(",
"self",
",",
"documents",
":",
"Union",
"[",
"List",
"[",
"dict",
"]",
",",
"List",
"[",
"Document",
"]",
"]",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"batch_size",
":",
"int",
"=",
"10_000",
")",
":",
"index",
"=",
"index",
"or",
"self",
".",
"index",
"if",
"not",
"self",
".",
"faiss_indexes",
".",
"get",
"(",
"index",
")",
":",
"self",
".",
"faiss_indexes",
"[",
"index",
"]",
"=",
"self",
".",
"_create_new_index",
"(",
"vector_dim",
"=",
"self",
".",
"vector_dim",
",",
"index_factory",
"=",
"self",
".",
"faiss_index_factory_str",
")",
"field_map",
"=",
"self",
".",
"_create_document_field_map",
"(",
")",
"document_objects",
"=",
"[",
"Document",
".",
"from_dict",
"(",
"d",
",",
"field_map",
"=",
"field_map",
")",
"if",
"isinstance",
"(",
"d",
",",
"dict",
")",
"else",
"d",
"for",
"d",
"in",
"documents",
"]",
"add_vectors",
"=",
"False",
"if",
"document_objects",
"[",
"0",
"]",
".",
"embedding",
"is",
"None",
"else",
"True",
"if",
"self",
".",
"update_existing_documents",
"and",
"add_vectors",
":",
"logger",
".",
"warning",
"(",
"\"You have enabled `update_existing_documents` feature and \"",
"\"`FAISSDocumentStore` does not support update in existing `faiss_index`.\\n\"",
"\"Please call `update_embeddings` method to repopulate `faiss_index`\"",
")",
"vector_id",
"=",
"self",
".",
"faiss_indexes",
"[",
"index",
"]",
".",
"ntotal",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"document_objects",
")",
",",
"batch_size",
")",
":",
"if",
"add_vectors",
":",
"embeddings",
"=",
"[",
"doc",
".",
"embedding",
"for",
"doc",
"in",
"document_objects",
"[",
"i",
":",
"i",
"+",
"batch_size",
"]",
"]",
"embeddings_to_index",
"=",
"np",
".",
"array",
"(",
"embeddings",
",",
"dtype",
"=",
"\"float32\"",
")",
"self",
".",
"faiss_indexes",
"[",
"index",
"]",
".",
"add",
"(",
"embeddings_to_index",
")",
"docs_to_write_in_sql",
"=",
"[",
"]",
"for",
"doc",
"in",
"document_objects",
"[",
"i",
":",
"i",
"+",
"batch_size",
"]",
":",
"meta",
"=",
"doc",
".",
"meta",
"if",
"add_vectors",
":",
"meta",
"[",
"\"vector_id\"",
"]",
"=",
"vector_id",
"vector_id",
"+=",
"1",
"docs_to_write_in_sql",
".",
"append",
"(",
"doc",
")",
"super",
"(",
"FAISSDocumentStore",
",",
"self",
")",
".",
"write_documents",
"(",
"docs_to_write_in_sql",
",",
"index",
"=",
"index",
")"
] | [
117,
4
] | [
161,
94
] | python | en | ['en', 'error', 'th'] | False |
FAISSDocumentStore.update_embeddings | (
self,
retriever: BaseRetriever,
index: Optional[str] = None,
update_existing_embeddings: bool = True,
filters: Optional[Dict[str, List[str]]] = None,
batch_size: int = 10_000
) |
Updates the embeddings in the the document store using the encoding model specified in the retriever.
This can be useful if want to add or change the embeddings for your documents (e.g. after changing the retriever config).
:param retriever: Retriever to use to get embeddings for text
:param index: Index name for which embeddings are to be updated. If set to None, the default self.index is used.
:param update_existing_embeddings: Whether to update existing embeddings of the documents. If set to False,
only documents without embeddings are processed. This mode can be used for
incremental updating of embeddings, wherein, only newly indexed documents
get processed.
:param filters: Optional filters to narrow down the documents for which embeddings are to be updated.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
:return: None
|
Updates the embeddings in the the document store using the encoding model specified in the retriever.
This can be useful if want to add or change the embeddings for your documents (e.g. after changing the retriever config). | def update_embeddings(
self,
retriever: BaseRetriever,
index: Optional[str] = None,
update_existing_embeddings: bool = True,
filters: Optional[Dict[str, List[str]]] = None,
batch_size: int = 10_000
):
"""
Updates the embeddings in the the document store using the encoding model specified in the retriever.
This can be useful if want to add or change the embeddings for your documents (e.g. after changing the retriever config).
:param retriever: Retriever to use to get embeddings for text
:param index: Index name for which embeddings are to be updated. If set to None, the default self.index is used.
:param update_existing_embeddings: Whether to update existing embeddings of the documents. If set to False,
only documents without embeddings are processed. This mode can be used for
incremental updating of embeddings, wherein, only newly indexed documents
get processed.
:param filters: Optional filters to narrow down the documents for which embeddings are to be updated.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
:return: None
"""
index = index or self.index
if not self.faiss_indexes.get(index):
raise ValueError("Couldn't find a FAISS index. Try to init the FAISSDocumentStore() again ...")
document_count = self.get_document_count(index=index)
if document_count == 0:
logger.warning("Calling DocumentStore.update_embeddings() on an empty index")
return
logger.info(f"Updating embeddings for {document_count} docs...")
vector_id = self.faiss_indexes[index].ntotal
result = self._query(
index=index,
vector_ids=None,
batch_size=batch_size,
filters=filters,
only_documents_without_embedding=not update_existing_embeddings
)
batched_documents = get_batches_from_generator(result, batch_size)
with tqdm(total=document_count, disable=not self.progress_bar) as progress_bar:
for document_batch in batched_documents:
embeddings = retriever.embed_passages(document_batch) # type: ignore
assert len(document_batch) == len(embeddings)
embeddings_to_index = np.array(embeddings, dtype="float32")
self.faiss_indexes[index].add(embeddings_to_index)
vector_id_map = {}
for doc in document_batch:
vector_id_map[doc.id] = vector_id
vector_id += 1
self.update_vector_ids(vector_id_map, index=index)
progress_bar.update(batch_size)
progress_bar.close() | [
"def",
"update_embeddings",
"(",
"self",
",",
"retriever",
":",
"BaseRetriever",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"update_existing_embeddings",
":",
"bool",
"=",
"True",
",",
"filters",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"]",
"=",
"None",
",",
"batch_size",
":",
"int",
"=",
"10_000",
")",
":",
"index",
"=",
"index",
"or",
"self",
".",
"index",
"if",
"not",
"self",
".",
"faiss_indexes",
".",
"get",
"(",
"index",
")",
":",
"raise",
"ValueError",
"(",
"\"Couldn't find a FAISS index. Try to init the FAISSDocumentStore() again ...\"",
")",
"document_count",
"=",
"self",
".",
"get_document_count",
"(",
"index",
"=",
"index",
")",
"if",
"document_count",
"==",
"0",
":",
"logger",
".",
"warning",
"(",
"\"Calling DocumentStore.update_embeddings() on an empty index\"",
")",
"return",
"logger",
".",
"info",
"(",
"f\"Updating embeddings for {document_count} docs...\"",
")",
"vector_id",
"=",
"self",
".",
"faiss_indexes",
"[",
"index",
"]",
".",
"ntotal",
"result",
"=",
"self",
".",
"_query",
"(",
"index",
"=",
"index",
",",
"vector_ids",
"=",
"None",
",",
"batch_size",
"=",
"batch_size",
",",
"filters",
"=",
"filters",
",",
"only_documents_without_embedding",
"=",
"not",
"update_existing_embeddings",
")",
"batched_documents",
"=",
"get_batches_from_generator",
"(",
"result",
",",
"batch_size",
")",
"with",
"tqdm",
"(",
"total",
"=",
"document_count",
",",
"disable",
"=",
"not",
"self",
".",
"progress_bar",
")",
"as",
"progress_bar",
":",
"for",
"document_batch",
"in",
"batched_documents",
":",
"embeddings",
"=",
"retriever",
".",
"embed_passages",
"(",
"document_batch",
")",
"# type: ignore",
"assert",
"len",
"(",
"document_batch",
")",
"==",
"len",
"(",
"embeddings",
")",
"embeddings_to_index",
"=",
"np",
".",
"array",
"(",
"embeddings",
",",
"dtype",
"=",
"\"float32\"",
")",
"self",
".",
"faiss_indexes",
"[",
"index",
"]",
".",
"add",
"(",
"embeddings_to_index",
")",
"vector_id_map",
"=",
"{",
"}",
"for",
"doc",
"in",
"document_batch",
":",
"vector_id_map",
"[",
"doc",
".",
"id",
"]",
"=",
"vector_id",
"vector_id",
"+=",
"1",
"self",
".",
"update_vector_ids",
"(",
"vector_id_map",
",",
"index",
"=",
"index",
")",
"progress_bar",
".",
"update",
"(",
"batch_size",
")",
"progress_bar",
".",
"close",
"(",
")"
] | [
168,
4
] | [
226,
28
] | python | en | ['en', 'error', 'th'] | False |
FAISSDocumentStore.get_all_documents_generator | (
self,
index: Optional[str] = None,
filters: Optional[Dict[str, List[str]]] = None,
return_embedding: Optional[bool] = None,
batch_size: int = 10_000,
) |
Get all documents from the document store. Under-the-hood, documents are fetched in batches from the
document store and yielded as individual documents. This method can be used to iteratively process
a large number of documents without having to load all documents in memory.
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param filters: Optional filters to narrow down the documents to return.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param return_embedding: Whether to return the document embeddings.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
|
Get all documents from the document store. Under-the-hood, documents are fetched in batches from the
document store and yielded as individual documents. This method can be used to iteratively process
a large number of documents without having to load all documents in memory. | def get_all_documents_generator(
self,
index: Optional[str] = None,
filters: Optional[Dict[str, List[str]]] = None,
return_embedding: Optional[bool] = None,
batch_size: int = 10_000,
) -> Generator[Document, None, None]:
"""
Get all documents from the document store. Under-the-hood, documents are fetched in batches from the
document store and yielded as individual documents. This method can be used to iteratively process
a large number of documents without having to load all documents in memory.
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param filters: Optional filters to narrow down the documents to return.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param return_embedding: Whether to return the document embeddings.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
"""
index = index or self.index
documents = super(FAISSDocumentStore, self).get_all_documents_generator(
index=index, filters=filters, batch_size=batch_size, return_embedding=False,
)
if return_embedding is None:
return_embedding = self.return_embedding
for doc in documents:
if return_embedding:
if doc.meta and doc.meta.get("vector_id") is not None:
doc.embedding = self.faiss_indexes[index].reconstruct(int(doc.meta["vector_id"]))
yield doc | [
"def",
"get_all_documents_generator",
"(",
"self",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"filters",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"]",
"=",
"None",
",",
"return_embedding",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
",",
"batch_size",
":",
"int",
"=",
"10_000",
",",
")",
"->",
"Generator",
"[",
"Document",
",",
"None",
",",
"None",
"]",
":",
"index",
"=",
"index",
"or",
"self",
".",
"index",
"documents",
"=",
"super",
"(",
"FAISSDocumentStore",
",",
"self",
")",
".",
"get_all_documents_generator",
"(",
"index",
"=",
"index",
",",
"filters",
"=",
"filters",
",",
"batch_size",
"=",
"batch_size",
",",
"return_embedding",
"=",
"False",
",",
")",
"if",
"return_embedding",
"is",
"None",
":",
"return_embedding",
"=",
"self",
".",
"return_embedding",
"for",
"doc",
"in",
"documents",
":",
"if",
"return_embedding",
":",
"if",
"doc",
".",
"meta",
"and",
"doc",
".",
"meta",
".",
"get",
"(",
"\"vector_id\"",
")",
"is",
"not",
"None",
":",
"doc",
".",
"embedding",
"=",
"self",
".",
"faiss_indexes",
"[",
"index",
"]",
".",
"reconstruct",
"(",
"int",
"(",
"doc",
".",
"meta",
"[",
"\"vector_id\"",
"]",
")",
")",
"yield",
"doc"
] | [
241,
4
] | [
271,
21
] | python | en | ['en', 'error', 'th'] | False |
FAISSDocumentStore.train_index | (
self,
documents: Optional[Union[List[dict], List[Document]]],
embeddings: Optional[np.ndarray] = None,
index: Optional[str] = None,
) |
Some FAISS indices (e.g. IVF) require initial "training" on a sample of vectors before you can add your final vectors.
The train vectors should come from the same distribution as your final ones.
You can pass either documents (incl. embeddings) or just the plain embeddings that the index shall be trained on.
:param documents: Documents (incl. the embeddings)
:param embeddings: Plain embeddings
:param index: Name of the index to train. If None, the DocumentStore's default index (self.index) will be used.
:return: None
|
Some FAISS indices (e.g. IVF) require initial "training" on a sample of vectors before you can add your final vectors.
The train vectors should come from the same distribution as your final ones.
You can pass either documents (incl. embeddings) or just the plain embeddings that the index shall be trained on. | def train_index(
self,
documents: Optional[Union[List[dict], List[Document]]],
embeddings: Optional[np.ndarray] = None,
index: Optional[str] = None,
):
"""
Some FAISS indices (e.g. IVF) require initial "training" on a sample of vectors before you can add your final vectors.
The train vectors should come from the same distribution as your final ones.
You can pass either documents (incl. embeddings) or just the plain embeddings that the index shall be trained on.
:param documents: Documents (incl. the embeddings)
:param embeddings: Plain embeddings
:param index: Name of the index to train. If None, the DocumentStore's default index (self.index) will be used.
:return: None
"""
index = index or self.index
if embeddings and documents:
raise ValueError("Either pass `documents` or `embeddings`. You passed both.")
if documents:
document_objects = [Document.from_dict(d) if isinstance(d, dict) else d for d in documents]
doc_embeddings = [doc.embedding for doc in document_objects]
embeddings_for_train = np.array(doc_embeddings, dtype="float32")
self.faiss_indexes[index].train(embeddings_for_train)
if embeddings:
self.faiss_indexes[index].train(embeddings) | [
"def",
"train_index",
"(",
"self",
",",
"documents",
":",
"Optional",
"[",
"Union",
"[",
"List",
"[",
"dict",
"]",
",",
"List",
"[",
"Document",
"]",
"]",
"]",
",",
"embeddings",
":",
"Optional",
"[",
"np",
".",
"ndarray",
"]",
"=",
"None",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
")",
":",
"index",
"=",
"index",
"or",
"self",
".",
"index",
"if",
"embeddings",
"and",
"documents",
":",
"raise",
"ValueError",
"(",
"\"Either pass `documents` or `embeddings`. You passed both.\"",
")",
"if",
"documents",
":",
"document_objects",
"=",
"[",
"Document",
".",
"from_dict",
"(",
"d",
")",
"if",
"isinstance",
"(",
"d",
",",
"dict",
")",
"else",
"d",
"for",
"d",
"in",
"documents",
"]",
"doc_embeddings",
"=",
"[",
"doc",
".",
"embedding",
"for",
"doc",
"in",
"document_objects",
"]",
"embeddings_for_train",
"=",
"np",
".",
"array",
"(",
"doc_embeddings",
",",
"dtype",
"=",
"\"float32\"",
")",
"self",
".",
"faiss_indexes",
"[",
"index",
"]",
".",
"train",
"(",
"embeddings_for_train",
")",
"if",
"embeddings",
":",
"self",
".",
"faiss_indexes",
"[",
"index",
"]",
".",
"train",
"(",
"embeddings",
")"
] | [
284,
4
] | [
310,
55
] | python | en | ['en', 'error', 'th'] | False |
FAISSDocumentStore.delete_all_documents | (self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None) |
Delete all documents from the document store.
|
Delete all documents from the document store.
| def delete_all_documents(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None):
"""
Delete all documents from the document store.
"""
if filters:
raise Exception("filters are supported for deleting documents in FAISSDocumentStore.")
index = index or self.index
if index in self.faiss_indexes.keys():
self.faiss_indexes[index].reset()
super().delete_all_documents(index=index) | [
"def",
"delete_all_documents",
"(",
"self",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"filters",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"]",
"=",
"None",
")",
":",
"if",
"filters",
":",
"raise",
"Exception",
"(",
"\"filters are supported for deleting documents in FAISSDocumentStore.\"",
")",
"index",
"=",
"index",
"or",
"self",
".",
"index",
"if",
"index",
"in",
"self",
".",
"faiss_indexes",
".",
"keys",
"(",
")",
":",
"self",
".",
"faiss_indexes",
"[",
"index",
"]",
".",
"reset",
"(",
")",
"super",
"(",
")",
".",
"delete_all_documents",
"(",
"index",
"=",
"index",
")"
] | [
312,
4
] | [
321,
49
] | python | en | ['en', 'error', 'th'] | False |
FAISSDocumentStore.query_by_embedding | (
self,
query_emb: np.ndarray,
filters: Optional[Dict[str, List[str]]] = None,
top_k: int = 10,
index: Optional[str] = None,
return_embedding: Optional[bool] = None
) |
Find the document that is most similar to the provided `query_emb` by using a vector similarity metric.
:param query_emb: Embedding of the query (e.g. gathered from DPR)
:param filters: Optional filters to narrow down the search space.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param top_k: How many documents to return
:param index: Index name to query the document from.
:param return_embedding: To return document embedding
:return:
|
Find the document that is most similar to the provided `query_emb` by using a vector similarity metric. | def query_by_embedding(
self,
query_emb: np.ndarray,
filters: Optional[Dict[str, List[str]]] = None,
top_k: int = 10,
index: Optional[str] = None,
return_embedding: Optional[bool] = None
) -> List[Document]:
"""
Find the document that is most similar to the provided `query_emb` by using a vector similarity metric.
:param query_emb: Embedding of the query (e.g. gathered from DPR)
:param filters: Optional filters to narrow down the search space.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param top_k: How many documents to return
:param index: Index name to query the document from.
:param return_embedding: To return document embedding
:return:
"""
if filters:
raise Exception("Query filters are not implemented for the FAISSDocumentStore.")
index = index or self.index
if not self.faiss_indexes.get(index):
raise Exception(f"Index named '{index}' does not exists. Use 'update_embeddings()' to create an index.")
if return_embedding is None:
return_embedding = self.return_embedding
query_emb = query_emb.reshape(1, -1).astype(np.float32)
score_matrix, vector_id_matrix = self.faiss_indexes[index].search(query_emb, top_k)
vector_ids_for_query = [str(vector_id) for vector_id in vector_id_matrix[0] if vector_id != -1]
documents = self.get_documents_by_vector_ids(vector_ids_for_query, index=index)
#assign query score to each document
scores_for_vector_ids: Dict[str, float] = {str(v_id): s for v_id, s in zip(vector_id_matrix[0], score_matrix[0])}
for doc in documents:
doc.score = scores_for_vector_ids[doc.meta["vector_id"]]
doc.probability = float(expit(np.asarray(doc.score / 100)))
if return_embedding is True:
doc.embedding = self.faiss_indexes[index].reconstruct(int(doc.meta["vector_id"]))
return documents | [
"def",
"query_by_embedding",
"(",
"self",
",",
"query_emb",
":",
"np",
".",
"ndarray",
",",
"filters",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"]",
"=",
"None",
",",
"top_k",
":",
"int",
"=",
"10",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"return_embedding",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
")",
"->",
"List",
"[",
"Document",
"]",
":",
"if",
"filters",
":",
"raise",
"Exception",
"(",
"\"Query filters are not implemented for the FAISSDocumentStore.\"",
")",
"index",
"=",
"index",
"or",
"self",
".",
"index",
"if",
"not",
"self",
".",
"faiss_indexes",
".",
"get",
"(",
"index",
")",
":",
"raise",
"Exception",
"(",
"f\"Index named '{index}' does not exists. Use 'update_embeddings()' to create an index.\"",
")",
"if",
"return_embedding",
"is",
"None",
":",
"return_embedding",
"=",
"self",
".",
"return_embedding",
"query_emb",
"=",
"query_emb",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"score_matrix",
",",
"vector_id_matrix",
"=",
"self",
".",
"faiss_indexes",
"[",
"index",
"]",
".",
"search",
"(",
"query_emb",
",",
"top_k",
")",
"vector_ids_for_query",
"=",
"[",
"str",
"(",
"vector_id",
")",
"for",
"vector_id",
"in",
"vector_id_matrix",
"[",
"0",
"]",
"if",
"vector_id",
"!=",
"-",
"1",
"]",
"documents",
"=",
"self",
".",
"get_documents_by_vector_ids",
"(",
"vector_ids_for_query",
",",
"index",
"=",
"index",
")",
"#assign query score to each document",
"scores_for_vector_ids",
":",
"Dict",
"[",
"str",
",",
"float",
"]",
"=",
"{",
"str",
"(",
"v_id",
")",
":",
"s",
"for",
"v_id",
",",
"s",
"in",
"zip",
"(",
"vector_id_matrix",
"[",
"0",
"]",
",",
"score_matrix",
"[",
"0",
"]",
")",
"}",
"for",
"doc",
"in",
"documents",
":",
"doc",
".",
"score",
"=",
"scores_for_vector_ids",
"[",
"doc",
".",
"meta",
"[",
"\"vector_id\"",
"]",
"]",
"doc",
".",
"probability",
"=",
"float",
"(",
"expit",
"(",
"np",
".",
"asarray",
"(",
"doc",
".",
"score",
"/",
"100",
")",
")",
")",
"if",
"return_embedding",
"is",
"True",
":",
"doc",
".",
"embedding",
"=",
"self",
".",
"faiss_indexes",
"[",
"index",
"]",
".",
"reconstruct",
"(",
"int",
"(",
"doc",
".",
"meta",
"[",
"\"vector_id\"",
"]",
")",
")",
"return",
"documents"
] | [
323,
4
] | [
366,
24
] | python | en | ['en', 'error', 'th'] | False |
FAISSDocumentStore.save | (self, file_path: Union[str, Path]) |
Save FAISS Index to the specified file.
:param file_path: Path to save to.
:return: None
|
Save FAISS Index to the specified file. | def save(self, file_path: Union[str, Path]):
"""
Save FAISS Index to the specified file.
:param file_path: Path to save to.
:return: None
"""
faiss.write_index(self.faiss_indexes[self.index], str(file_path)) | [
"def",
"save",
"(",
"self",
",",
"file_path",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
")",
":",
"faiss",
".",
"write_index",
"(",
"self",
".",
"faiss_indexes",
"[",
"self",
".",
"index",
"]",
",",
"str",
"(",
"file_path",
")",
")"
] | [
368,
4
] | [
375,
73
] | python | en | ['en', 'error', 'th'] | False |
FAISSDocumentStore.load | (
cls,
faiss_file_path: Union[str, Path],
sql_url: str,
index: str,
) |
Load a saved FAISS index from a file and connect to the SQL database.
Note: In order to have a correct mapping from FAISS to SQL,
make sure to use the same SQL DB that you used when calling `save()`.
:param faiss_file_path: Stored FAISS index file. Can be created via calling `save()`
:param sql_url: Connection string to the SQL database that contains your docs and metadata.
:param index: Index name to load the FAISS index as. It must match the index name used for
when creating the FAISS index.
:return:
|
Load a saved FAISS index from a file and connect to the SQL database.
Note: In order to have a correct mapping from FAISS to SQL,
make sure to use the same SQL DB that you used when calling `save()`. | def load(
cls,
faiss_file_path: Union[str, Path],
sql_url: str,
index: str,
):
"""
Load a saved FAISS index from a file and connect to the SQL database.
Note: In order to have a correct mapping from FAISS to SQL,
make sure to use the same SQL DB that you used when calling `save()`.
:param faiss_file_path: Stored FAISS index file. Can be created via calling `save()`
:param sql_url: Connection string to the SQL database that contains your docs and metadata.
:param index: Index name to load the FAISS index as. It must match the index name used for
when creating the FAISS index.
:return:
"""
"""
"""
faiss_index = faiss.read_index(str(faiss_file_path))
return cls(
faiss_index=faiss_index,
sql_url=sql_url,
vector_dim=faiss_index.d,
index=index,
) | [
"def",
"load",
"(",
"cls",
",",
"faiss_file_path",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
",",
"sql_url",
":",
"str",
",",
"index",
":",
"str",
",",
")",
":",
"\"\"\"\n \"\"\"",
"faiss_index",
"=",
"faiss",
".",
"read_index",
"(",
"str",
"(",
"faiss_file_path",
")",
")",
"return",
"cls",
"(",
"faiss_index",
"=",
"faiss_index",
",",
"sql_url",
"=",
"sql_url",
",",
"vector_dim",
"=",
"faiss_index",
".",
"d",
",",
"index",
"=",
"index",
",",
")"
] | [
378,
4
] | [
403,
9
] | python | en | ['en', 'error', 'th'] | False |
PublicUserApiTests.test_create_valid_user_seccess | (self) | Test creating user with valid payload is successful | Test creating user with valid payload is successful | def test_create_valid_user_seccess(self):
"""Test creating user with valid payload is successful"""
payload = {
'email': '[email protected]',
'password': 'testpass',
'name': 'Test name',
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data) | [
"def",
"test_create_valid_user_seccess",
"(",
"self",
")",
":",
"payload",
"=",
"{",
"'email'",
":",
"'[email protected]'",
",",
"'password'",
":",
"'testpass'",
",",
"'name'",
":",
"'Test name'",
",",
"}",
"res",
"=",
"self",
".",
"client",
".",
"post",
"(",
"CREATE_USER_URL",
",",
"payload",
")",
"self",
".",
"assertEqual",
"(",
"res",
".",
"status_code",
",",
"status",
".",
"HTTP_201_CREATED",
")",
"user",
"=",
"get_user_model",
"(",
")",
".",
"objects",
".",
"get",
"(",
"*",
"*",
"res",
".",
"data",
")",
"self",
".",
"assertTrue",
"(",
"user",
".",
"check_password",
"(",
"payload",
"[",
"'password'",
"]",
")",
")",
"self",
".",
"assertNotIn",
"(",
"'password'",
",",
"res",
".",
"data",
")"
] | [
23,
4
] | [
35,
46
] | python | en | ['en', 'en', 'en'] | True |
PublicUserApiTests.test_user_exists | (self) | Test creating a user that already exists fails | Test creating a user that already exists fails | def test_user_exists(self):
"""Test creating a user that already exists fails"""
payload = {
'email': '[email protected]',
'password': 'testpass',
'name': 'Test name',
}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) | [
"def",
"test_user_exists",
"(",
"self",
")",
":",
"payload",
"=",
"{",
"'email'",
":",
"'[email protected]'",
",",
"'password'",
":",
"'testpass'",
",",
"'name'",
":",
"'Test name'",
",",
"}",
"create_user",
"(",
"*",
"*",
"payload",
")",
"res",
"=",
"self",
".",
"client",
".",
"post",
"(",
"CREATE_USER_URL",
",",
"payload",
")",
"self",
".",
"assertEqual",
"(",
"res",
".",
"status_code",
",",
"status",
".",
"HTTP_400_BAD_REQUEST",
")"
] | [
37,
4
] | [
48,
70
] | python | en | ['en', 'en', 'en'] | True |
PublicUserApiTests.test_password_too_short | (self) | Test that the password must be more than 5 characters | Test that the password must be more than 5 characters | def test_password_too_short(self):
"""Test that the password must be more than 5 characters"""
payload = {
'email': '[email protected]',
'password': 'pw',
'name': 'Test name',
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists) | [
"def",
"test_password_too_short",
"(",
"self",
")",
":",
"payload",
"=",
"{",
"'email'",
":",
"'[email protected]'",
",",
"'password'",
":",
"'pw'",
",",
"'name'",
":",
"'Test name'",
",",
"}",
"res",
"=",
"self",
".",
"client",
".",
"post",
"(",
"CREATE_USER_URL",
",",
"payload",
")",
"self",
".",
"assertEqual",
"(",
"res",
".",
"status_code",
",",
"status",
".",
"HTTP_400_BAD_REQUEST",
")",
"user_exists",
"=",
"get_user_model",
"(",
")",
".",
"objects",
".",
"filter",
"(",
"email",
"=",
"payload",
"[",
"'email'",
"]",
")",
".",
"exists",
"(",
")",
"self",
".",
"assertFalse",
"(",
"user_exists",
")"
] | [
50,
4
] | [
63,
37
] | python | en | ['en', 'en', 'en'] | True |
PublicUserApiTests.test_create_token_for_user | (self) | Test that a token is created for the user | Test that a token is created for the user | def test_create_token_for_user(self):
"""Test that a token is created for the user"""
payload = {
'email': '[email protected]',
'password': 'testpass',
}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK) | [
"def",
"test_create_token_for_user",
"(",
"self",
")",
":",
"payload",
"=",
"{",
"'email'",
":",
"'[email protected]'",
",",
"'password'",
":",
"'testpass'",
",",
"}",
"create_user",
"(",
"*",
"*",
"payload",
")",
"res",
"=",
"self",
".",
"client",
".",
"post",
"(",
"TOKEN_URL",
",",
"payload",
")",
"self",
".",
"assertIn",
"(",
"'token'",
",",
"res",
".",
"data",
")",
"self",
".",
"assertEqual",
"(",
"res",
".",
"status_code",
",",
"status",
".",
"HTTP_200_OK",
")"
] | [
65,
4
] | [
75,
61
] | python | en | ['en', 'en', 'en'] | True |
PublicUserApiTests.test_create_token_invalid_credentials | (self) | Test that token is not created if invalid credentials are given | Test that token is not created if invalid credentials are given | def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are given"""
create_user(email='[email protected]', password='testpass')
payload = {
'email': '[email protected]',
'password': 'wrong',
}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) | [
"def",
"test_create_token_invalid_credentials",
"(",
"self",
")",
":",
"create_user",
"(",
"email",
"=",
"'[email protected]'",
",",
"password",
"=",
"'testpass'",
")",
"payload",
"=",
"{",
"'email'",
":",
"'[email protected]'",
",",
"'password'",
":",
"'wrong'",
",",
"}",
"res",
"=",
"self",
".",
"client",
".",
"post",
"(",
"TOKEN_URL",
",",
"payload",
")",
"self",
".",
"assertNotIn",
"(",
"'token'",
",",
"res",
".",
"data",
")",
"self",
".",
"assertEqual",
"(",
"res",
".",
"status_code",
",",
"status",
".",
"HTTP_400_BAD_REQUEST",
")"
] | [
77,
4
] | [
87,
70
] | python | en | ['en', 'en', 'en'] | True |
PublicUserApiTests.test_create_token_no_user | (self) | Test that token is not created is user doesn't exist | Test that token is not created is user doesn't exist | def test_create_token_no_user(self):
"""Test that token is not created is user doesn't exist"""
payload = {
'email': '[email protected]',
'password': 'wrong',
}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) | [
"def",
"test_create_token_no_user",
"(",
"self",
")",
":",
"payload",
"=",
"{",
"'email'",
":",
"'[email protected]'",
",",
"'password'",
":",
"'wrong'",
",",
"}",
"res",
"=",
"self",
".",
"client",
".",
"post",
"(",
"TOKEN_URL",
",",
"payload",
")",
"self",
".",
"assertNotIn",
"(",
"'token'",
",",
"res",
".",
"data",
")",
"self",
".",
"assertEqual",
"(",
"res",
".",
"status_code",
",",
"status",
".",
"HTTP_400_BAD_REQUEST",
")"
] | [
89,
4
] | [
98,
70
] | python | en | ['en', 'en', 'en'] | True |
PublicUserApiTests.test_create_token_missing_field | (self) | Test that email and password are required | Test that email and password are required | def test_create_token_missing_field(self):
"""Test that email and password are required"""
payload = {
'email': 'test',
'password': '',
}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) | [
"def",
"test_create_token_missing_field",
"(",
"self",
")",
":",
"payload",
"=",
"{",
"'email'",
":",
"'test'",
",",
"'password'",
":",
"''",
",",
"}",
"res",
"=",
"self",
".",
"client",
".",
"post",
"(",
"TOKEN_URL",
",",
"payload",
")",
"self",
".",
"assertNotIn",
"(",
"'token'",
",",
"res",
".",
"data",
")",
"self",
".",
"assertEqual",
"(",
"res",
".",
"status_code",
",",
"status",
".",
"HTTP_400_BAD_REQUEST",
")"
] | [
100,
4
] | [
109,
70
] | python | en | ['en', 'en', 'en'] | True |
PublicUserApiTests.test_retrieve_user_unauthorized | (self) | Test that authentication is required for users | Test that authentication is required for users | def test_retrieve_user_unauthorized(self):
"""Test that authentication is required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED) | [
"def",
"test_retrieve_user_unauthorized",
"(",
"self",
")",
":",
"res",
"=",
"self",
".",
"client",
".",
"get",
"(",
"ME_URL",
")",
"self",
".",
"assertEqual",
"(",
"res",
".",
"status_code",
",",
"status",
".",
"HTTP_401_UNAUTHORIZED",
")"
] | [
111,
4
] | [
115,
71
] | python | en | ['en', 'en', 'en'] | True |
PrivateUserApiTests.test_retrieve_profile_success | (self) | Test retrieving profile for logged in used | Test retrieving profile for logged in used | def test_retrieve_profile_success(self):
"""Test retrieving profile for logged in used"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email
}) | [
"def",
"test_retrieve_profile_success",
"(",
"self",
")",
":",
"res",
"=",
"self",
".",
"client",
".",
"get",
"(",
"ME_URL",
")",
"self",
".",
"assertEqual",
"(",
"res",
".",
"status_code",
",",
"status",
".",
"HTTP_200_OK",
")",
"self",
".",
"assertEqual",
"(",
"res",
".",
"data",
",",
"{",
"'name'",
":",
"self",
".",
"user",
".",
"name",
",",
"'email'",
":",
"self",
".",
"user",
".",
"email",
"}",
")"
] | [
130,
4
] | [
138,
10
] | python | en | ['en', 'en', 'en'] | True |
PrivateUserApiTests.test_post_me_not_allowed | (self) | Test that POST is not allowed on the me url | Test that POST is not allowed on the me url | def test_post_me_not_allowed(self):
"""Test that POST is not allowed on the me url"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED) | [
"def",
"test_post_me_not_allowed",
"(",
"self",
")",
":",
"res",
"=",
"self",
".",
"client",
".",
"post",
"(",
"ME_URL",
",",
"{",
"}",
")",
"self",
".",
"assertEqual",
"(",
"res",
".",
"status_code",
",",
"status",
".",
"HTTP_405_METHOD_NOT_ALLOWED",
")"
] | [
140,
4
] | [
144,
77
] | python | en | ['en', 'en', 'en'] | True |
PrivateUserApiTests.test_update_user_profile | (self) | Test updating the user profile for authenticated user | Test updating the user profile for authenticated user | def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {
'name': 'new name',
'password': 'newpass'
}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK) | [
"def",
"test_update_user_profile",
"(",
"self",
")",
":",
"payload",
"=",
"{",
"'name'",
":",
"'new name'",
",",
"'password'",
":",
"'newpass'",
"}",
"res",
"=",
"self",
".",
"client",
".",
"patch",
"(",
"ME_URL",
",",
"payload",
")",
"self",
".",
"user",
".",
"refresh_from_db",
"(",
")",
"self",
".",
"assertEqual",
"(",
"self",
".",
"user",
".",
"name",
",",
"payload",
"[",
"'name'",
"]",
")",
"self",
".",
"assertTrue",
"(",
"self",
".",
"user",
".",
"check_password",
"(",
"payload",
"[",
"'password'",
"]",
")",
")",
"self",
".",
"assertEqual",
"(",
"res",
".",
"status_code",
",",
"status",
".",
"HTTP_200_OK",
")"
] | [
146,
4
] | [
157,
61
] | python | en | ['en', 'en', 'en'] | True |
SSLStream.do_handshake | (self) | Ensure that the initial handshake has completed.
The SSL protocol requires an initial handshake to exchange
certificates, select cryptographic keys, and so forth, before any
actual data can be sent or received. You don't have to call this
method; if you don't, then :class:`SSLStream` will automatically
peform the handshake as needed, the first time you try to send or
receive data. But if you want to trigger it manually – for example,
because you want to look at the peer's certificate before you start
talking to them – then you can call this method.
If the initial handshake is already in progress in another task, this
waits for it to complete and then returns.
If the initial handshake has already completed, this returns
immediately without doing anything (except executing a checkpoint).
.. warning:: If this method is cancelled, then it may leave the
:class:`SSLStream` in an unusable state. If this happens then any
future attempt to use the object will raise
:exc:`trio.BrokenResourceError`.
| Ensure that the initial handshake has completed. | async def do_handshake(self):
"""Ensure that the initial handshake has completed.
The SSL protocol requires an initial handshake to exchange
certificates, select cryptographic keys, and so forth, before any
actual data can be sent or received. You don't have to call this
method; if you don't, then :class:`SSLStream` will automatically
peform the handshake as needed, the first time you try to send or
receive data. But if you want to trigger it manually – for example,
because you want to look at the peer's certificate before you start
talking to them – then you can call this method.
If the initial handshake is already in progress in another task, this
waits for it to complete and then returns.
If the initial handshake has already completed, this returns
immediately without doing anything (except executing a checkpoint).
.. warning:: If this method is cancelled, then it may leave the
:class:`SSLStream` in an unusable state. If this happens then any
future attempt to use the object will raise
:exc:`trio.BrokenResourceError`.
"""
self._check_status()
await self._handshook.ensure(checkpoint=True) | [
"async",
"def",
"do_handshake",
"(",
"self",
")",
":",
"self",
".",
"_check_status",
"(",
")",
"await",
"self",
".",
"_handshook",
".",
"ensure",
"(",
"checkpoint",
"=",
"True",
")"
] | [
608,
4
] | [
633,
53
] | python | en | ['en', 'en', 'en'] | True |
SSLStream.receive_some | (self, max_bytes=None) | Read some data from the underlying transport, decrypt it, and
return it.
See :meth:`trio.abc.ReceiveStream.receive_some` for details.
.. warning:: If this method is cancelled while the initial handshake
or a renegotiation are in progress, then it may leave the
:class:`SSLStream` in an unusable state. If this happens then any
future attempt to use the object will raise
:exc:`trio.BrokenResourceError`.
| Read some data from the underlying transport, decrypt it, and
return it. | async def receive_some(self, max_bytes=None):
"""Read some data from the underlying transport, decrypt it, and
return it.
See :meth:`trio.abc.ReceiveStream.receive_some` for details.
.. warning:: If this method is cancelled while the initial handshake
or a renegotiation are in progress, then it may leave the
:class:`SSLStream` in an unusable state. If this happens then any
future attempt to use the object will raise
:exc:`trio.BrokenResourceError`.
"""
with self._outer_recv_conflict_detector:
self._check_status()
try:
await self._handshook.ensure(checkpoint=False)
except trio.BrokenResourceError as exc:
# For some reason, EOF before handshake sometimes raises
# SSLSyscallError instead of SSLEOFError (e.g. on my linux
# laptop, but not on appveyor). Thanks openssl.
if self._https_compatible and isinstance(
exc.__cause__,
(_stdlib_ssl.SSLEOFError, _stdlib_ssl.SSLSyscallError),
):
await trio.lowlevel.checkpoint()
return b""
else:
raise
if max_bytes is None:
# If we somehow have more data already in our pending buffer
# than the estimate receive size, bump up our size a bit for
# this read only.
max_bytes = max(self._estimated_receive_size, self._incoming.pending)
else:
max_bytes = _operator.index(max_bytes)
if max_bytes < 1:
raise ValueError("max_bytes must be >= 1")
try:
return await self._retry(self._ssl_object.read, max_bytes)
except trio.BrokenResourceError as exc:
# This isn't quite equivalent to just returning b"" in the
# first place, because we still end up with self._state set to
# BROKEN. But that's actually fine, because after getting an
# EOF on TLS then the only thing you can do is close the
# stream, and closing doesn't care about the state.
if self._https_compatible and isinstance(
exc.__cause__, _stdlib_ssl.SSLEOFError
):
await trio.lowlevel.checkpoint()
return b""
else:
raise | [
"async",
"def",
"receive_some",
"(",
"self",
",",
"max_bytes",
"=",
"None",
")",
":",
"with",
"self",
".",
"_outer_recv_conflict_detector",
":",
"self",
".",
"_check_status",
"(",
")",
"try",
":",
"await",
"self",
".",
"_handshook",
".",
"ensure",
"(",
"checkpoint",
"=",
"False",
")",
"except",
"trio",
".",
"BrokenResourceError",
"as",
"exc",
":",
"# For some reason, EOF before handshake sometimes raises",
"# SSLSyscallError instead of SSLEOFError (e.g. on my linux",
"# laptop, but not on appveyor). Thanks openssl.",
"if",
"self",
".",
"_https_compatible",
"and",
"isinstance",
"(",
"exc",
".",
"__cause__",
",",
"(",
"_stdlib_ssl",
".",
"SSLEOFError",
",",
"_stdlib_ssl",
".",
"SSLSyscallError",
")",
",",
")",
":",
"await",
"trio",
".",
"lowlevel",
".",
"checkpoint",
"(",
")",
"return",
"b\"\"",
"else",
":",
"raise",
"if",
"max_bytes",
"is",
"None",
":",
"# If we somehow have more data already in our pending buffer",
"# than the estimate receive size, bump up our size a bit for",
"# this read only.",
"max_bytes",
"=",
"max",
"(",
"self",
".",
"_estimated_receive_size",
",",
"self",
".",
"_incoming",
".",
"pending",
")",
"else",
":",
"max_bytes",
"=",
"_operator",
".",
"index",
"(",
"max_bytes",
")",
"if",
"max_bytes",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"max_bytes must be >= 1\"",
")",
"try",
":",
"return",
"await",
"self",
".",
"_retry",
"(",
"self",
".",
"_ssl_object",
".",
"read",
",",
"max_bytes",
")",
"except",
"trio",
".",
"BrokenResourceError",
"as",
"exc",
":",
"# This isn't quite equivalent to just returning b\"\" in the",
"# first place, because we still end up with self._state set to",
"# BROKEN. But that's actually fine, because after getting an",
"# EOF on TLS then the only thing you can do is close the",
"# stream, and closing doesn't care about the state.",
"if",
"self",
".",
"_https_compatible",
"and",
"isinstance",
"(",
"exc",
".",
"__cause__",
",",
"_stdlib_ssl",
".",
"SSLEOFError",
")",
":",
"await",
"trio",
".",
"lowlevel",
".",
"checkpoint",
"(",
")",
"return",
"b\"\"",
"else",
":",
"raise"
] | [
643,
4
] | [
695,
25
] | python | en | ['en', 'en', 'en'] | True |
SSLStream.send_all | (self, data) | Encrypt some data and then send it on the underlying transport.
See :meth:`trio.abc.SendStream.send_all` for details.
.. warning:: If this method is cancelled, then it may leave the
:class:`SSLStream` in an unusable state. If this happens then any
attempt to use the object will raise
:exc:`trio.BrokenResourceError`.
| Encrypt some data and then send it on the underlying transport. | async def send_all(self, data):
"""Encrypt some data and then send it on the underlying transport.
See :meth:`trio.abc.SendStream.send_all` for details.
.. warning:: If this method is cancelled, then it may leave the
:class:`SSLStream` in an unusable state. If this happens then any
attempt to use the object will raise
:exc:`trio.BrokenResourceError`.
"""
with self._outer_send_conflict_detector:
self._check_status()
await self._handshook.ensure(checkpoint=False)
# SSLObject interprets write(b"") as an EOF for some reason, which
# is not what we want.
if not data:
await trio.lowlevel.checkpoint()
return
await self._retry(self._ssl_object.write, data) | [
"async",
"def",
"send_all",
"(",
"self",
",",
"data",
")",
":",
"with",
"self",
".",
"_outer_send_conflict_detector",
":",
"self",
".",
"_check_status",
"(",
")",
"await",
"self",
".",
"_handshook",
".",
"ensure",
"(",
"checkpoint",
"=",
"False",
")",
"# SSLObject interprets write(b\"\") as an EOF for some reason, which",
"# is not what we want.",
"if",
"not",
"data",
":",
"await",
"trio",
".",
"lowlevel",
".",
"checkpoint",
"(",
")",
"return",
"await",
"self",
".",
"_retry",
"(",
"self",
".",
"_ssl_object",
".",
"write",
",",
"data",
")"
] | [
697,
4
] | [
716,
59
] | python | en | ['en', 'en', 'en'] | True |
SSLStream.unwrap | (self) | Cleanly close down the SSL/TLS encryption layer, allowing the
underlying stream to be used for unencrypted communication.
You almost certainly don't need this.
Returns:
A pair ``(transport_stream, trailing_bytes)``, where
``transport_stream`` is the underlying transport stream, and
``trailing_bytes`` is a byte string. Since :class:`SSLStream`
doesn't necessarily know where the end of the encrypted data will
be, it can happen that it accidentally reads too much from the
underlying stream. ``trailing_bytes`` contains this extra data; you
should process it as if it was returned from a call to
``transport_stream.receive_some(...)``.
| Cleanly close down the SSL/TLS encryption layer, allowing the
underlying stream to be used for unencrypted communication. | async def unwrap(self):
"""Cleanly close down the SSL/TLS encryption layer, allowing the
underlying stream to be used for unencrypted communication.
You almost certainly don't need this.
Returns:
A pair ``(transport_stream, trailing_bytes)``, where
``transport_stream`` is the underlying transport stream, and
``trailing_bytes`` is a byte string. Since :class:`SSLStream`
doesn't necessarily know where the end of the encrypted data will
be, it can happen that it accidentally reads too much from the
underlying stream. ``trailing_bytes`` contains this extra data; you
should process it as if it was returned from a call to
``transport_stream.receive_some(...)``.
"""
with self._outer_recv_conflict_detector, self._outer_send_conflict_detector:
self._check_status()
await self._handshook.ensure(checkpoint=False)
await self._retry(self._ssl_object.unwrap)
transport_stream = self.transport_stream
self.transport_stream = None
self._state = _State.CLOSED
return (transport_stream, self._incoming.read()) | [
"async",
"def",
"unwrap",
"(",
"self",
")",
":",
"with",
"self",
".",
"_outer_recv_conflict_detector",
",",
"self",
".",
"_outer_send_conflict_detector",
":",
"self",
".",
"_check_status",
"(",
")",
"await",
"self",
".",
"_handshook",
".",
"ensure",
"(",
"checkpoint",
"=",
"False",
")",
"await",
"self",
".",
"_retry",
"(",
"self",
".",
"_ssl_object",
".",
"unwrap",
")",
"transport_stream",
"=",
"self",
".",
"transport_stream",
"self",
".",
"transport_stream",
"=",
"None",
"self",
".",
"_state",
"=",
"_State",
".",
"CLOSED",
"return",
"(",
"transport_stream",
",",
"self",
".",
"_incoming",
".",
"read",
"(",
")",
")"
] | [
718,
4
] | [
742,
60
] | python | en | ['en', 'en', 'en'] | True |
SSLStream.aclose | (self) | Gracefully shut down this connection, and close the underlying
transport.
If ``https_compatible`` is False (the default), then this attempts to
first send a ``close_notify`` and then close the underlying stream by
calling its :meth:`~trio.abc.AsyncResource.aclose` method.
If ``https_compatible`` is set to True, then this simply closes the
underlying stream and marks this stream as closed.
| Gracefully shut down this connection, and close the underlying
transport. | async def aclose(self):
"""Gracefully shut down this connection, and close the underlying
transport.
If ``https_compatible`` is False (the default), then this attempts to
first send a ``close_notify`` and then close the underlying stream by
calling its :meth:`~trio.abc.AsyncResource.aclose` method.
If ``https_compatible`` is set to True, then this simply closes the
underlying stream and marks this stream as closed.
"""
if self._state is _State.CLOSED:
await trio.lowlevel.checkpoint()
return
if self._state is _State.BROKEN or self._https_compatible:
self._state = _State.CLOSED
await self.transport_stream.aclose()
return
try:
# https_compatible=False, so we're in spec-compliant mode and have
# to send close_notify so that the other side gets a cryptographic
# assurance that we've called aclose. Of course, we can't do
# anything cryptographic until after we've completed the
# handshake:
await self._handshook.ensure(checkpoint=False)
# Then, we call SSL_shutdown *once*, because we want to send a
# close_notify but *not* wait for the other side to send back a
# response. In principle it would be more polite to wait for the
# other side to reply with their own close_notify. However, if
# they aren't paying attention (e.g., if they're just sending
# data and not receiving) then we will never notice our
# close_notify and we'll be waiting forever. Eventually we'll time
# out (hopefully), but it's still kind of nasty. And we can't
# require the other side to always be receiving, because (a)
# backpressure is kind of important, and (b) I bet there are
# broken TLS implementations out there that don't receive all the
# time. (Like e.g. anyone using Python ssl in synchronous mode.)
#
# The send-then-immediately-close behavior is explicitly allowed
# by the TLS specs, so we're ok on that.
#
# Subtlety: SSLObject.unwrap will immediately call it a second
# time, and the second time will raise SSLWantReadError because
# there hasn't been time for the other side to respond
# yet. (Unless they spontaneously sent a close_notify before we
# called this, and it's either already been processed or gets
# pulled out of the buffer by Python's second call.) So the way to
# do what we want is to ignore SSLWantReadError on this call.
#
# Also, because the other side might have already sent
# close_notify and closed their connection then it's possible that
# our attempt to send close_notify will raise
# BrokenResourceError. This is totally legal, and in fact can happen
# with two well-behaved Trio programs talking to each other, so we
# don't want to raise an error. So we suppress BrokenResourceError
# here. (This is safe, because literally the only thing this call
# to _retry will do is send the close_notify alert, so that's
# surely where the error comes from.)
#
# FYI in some cases this could also raise SSLSyscallError which I
# think is because SSL_shutdown is terrible. (Check out that note
# at the bottom of the man page saying that it sometimes gets
# raised spuriously.) I haven't seen this since we switched to
# immediately closing the socket, and I don't know exactly what
# conditions cause it and how to respond, so for now we're just
# letting that happen. But if you start seeing it, then hopefully
# this will give you a little head start on tracking it down,
# because whoa did this puzzle us at the 2017 PyCon sprints.
#
# Also, if someone else is blocked in send/receive, then we aren't
# going to be able to do a clean shutdown. If that happens, we'll
# just do an unclean shutdown.
try:
await self._retry(self._ssl_object.unwrap, ignore_want_read=True)
except (trio.BrokenResourceError, trio.BusyResourceError):
pass
except:
# Failure! Kill the stream and move on.
await aclose_forcefully(self.transport_stream)
raise
else:
# Success! Gracefully close the underlying stream.
await self.transport_stream.aclose()
finally:
self._state = _State.CLOSED | [
"async",
"def",
"aclose",
"(",
"self",
")",
":",
"if",
"self",
".",
"_state",
"is",
"_State",
".",
"CLOSED",
":",
"await",
"trio",
".",
"lowlevel",
".",
"checkpoint",
"(",
")",
"return",
"if",
"self",
".",
"_state",
"is",
"_State",
".",
"BROKEN",
"or",
"self",
".",
"_https_compatible",
":",
"self",
".",
"_state",
"=",
"_State",
".",
"CLOSED",
"await",
"self",
".",
"transport_stream",
".",
"aclose",
"(",
")",
"return",
"try",
":",
"# https_compatible=False, so we're in spec-compliant mode and have",
"# to send close_notify so that the other side gets a cryptographic",
"# assurance that we've called aclose. Of course, we can't do",
"# anything cryptographic until after we've completed the",
"# handshake:",
"await",
"self",
".",
"_handshook",
".",
"ensure",
"(",
"checkpoint",
"=",
"False",
")",
"# Then, we call SSL_shutdown *once*, because we want to send a",
"# close_notify but *not* wait for the other side to send back a",
"# response. In principle it would be more polite to wait for the",
"# other side to reply with their own close_notify. However, if",
"# they aren't paying attention (e.g., if they're just sending",
"# data and not receiving) then we will never notice our",
"# close_notify and we'll be waiting forever. Eventually we'll time",
"# out (hopefully), but it's still kind of nasty. And we can't",
"# require the other side to always be receiving, because (a)",
"# backpressure is kind of important, and (b) I bet there are",
"# broken TLS implementations out there that don't receive all the",
"# time. (Like e.g. anyone using Python ssl in synchronous mode.)",
"#",
"# The send-then-immediately-close behavior is explicitly allowed",
"# by the TLS specs, so we're ok on that.",
"#",
"# Subtlety: SSLObject.unwrap will immediately call it a second",
"# time, and the second time will raise SSLWantReadError because",
"# there hasn't been time for the other side to respond",
"# yet. (Unless they spontaneously sent a close_notify before we",
"# called this, and it's either already been processed or gets",
"# pulled out of the buffer by Python's second call.) So the way to",
"# do what we want is to ignore SSLWantReadError on this call.",
"#",
"# Also, because the other side might have already sent",
"# close_notify and closed their connection then it's possible that",
"# our attempt to send close_notify will raise",
"# BrokenResourceError. This is totally legal, and in fact can happen",
"# with two well-behaved Trio programs talking to each other, so we",
"# don't want to raise an error. So we suppress BrokenResourceError",
"# here. (This is safe, because literally the only thing this call",
"# to _retry will do is send the close_notify alert, so that's",
"# surely where the error comes from.)",
"#",
"# FYI in some cases this could also raise SSLSyscallError which I",
"# think is because SSL_shutdown is terrible. (Check out that note",
"# at the bottom of the man page saying that it sometimes gets",
"# raised spuriously.) I haven't seen this since we switched to",
"# immediately closing the socket, and I don't know exactly what",
"# conditions cause it and how to respond, so for now we're just",
"# letting that happen. But if you start seeing it, then hopefully",
"# this will give you a little head start on tracking it down,",
"# because whoa did this puzzle us at the 2017 PyCon sprints.",
"#",
"# Also, if someone else is blocked in send/receive, then we aren't",
"# going to be able to do a clean shutdown. If that happens, we'll",
"# just do an unclean shutdown.",
"try",
":",
"await",
"self",
".",
"_retry",
"(",
"self",
".",
"_ssl_object",
".",
"unwrap",
",",
"ignore_want_read",
"=",
"True",
")",
"except",
"(",
"trio",
".",
"BrokenResourceError",
",",
"trio",
".",
"BusyResourceError",
")",
":",
"pass",
"except",
":",
"# Failure! Kill the stream and move on.",
"await",
"aclose_forcefully",
"(",
"self",
".",
"transport_stream",
")",
"raise",
"else",
":",
"# Success! Gracefully close the underlying stream.",
"await",
"self",
".",
"transport_stream",
".",
"aclose",
"(",
")",
"finally",
":",
"self",
".",
"_state",
"=",
"_State",
".",
"CLOSED"
] | [
744,
4
] | [
829,
39
] | python | en | ['en', 'en', 'en'] | True |
SSLStream.wait_send_all_might_not_block | (self) | See :meth:`trio.abc.SendStream.wait_send_all_might_not_block`. | See :meth:`trio.abc.SendStream.wait_send_all_might_not_block`. | async def wait_send_all_might_not_block(self):
"""See :meth:`trio.abc.SendStream.wait_send_all_might_not_block`."""
# This method's implementation is deceptively simple.
#
# First, we take the outer send lock, because of Trio's standard
# semantics that wait_send_all_might_not_block and send_all
# conflict.
with self._outer_send_conflict_detector:
self._check_status()
# Then we take the inner send lock. We know that no other tasks
# are calling self.send_all or self.wait_send_all_might_not_block,
# because we have the outer_send_lock. But! There might be another
# task calling self.receive_some -> transport_stream.send_all, in
# which case if we were to call
# transport_stream.wait_send_all_might_not_block directly we'd
# have two tasks doing write-related operations on
# transport_stream simultaneously, which is not allowed. We
# *don't* want to raise this conflict to our caller, because it's
# purely an internal affair – all they did was call
# wait_send_all_might_not_block and receive_some at the same time,
# which is totally valid. And waiting for the lock is OK, because
# a call to send_all certainly wouldn't complete while the other
# task holds the lock.
async with self._inner_send_lock:
# Now we have the lock, which creates another potential
# problem: what if a call to self.receive_some attempts to do
# transport_stream.send_all now? It'll have to wait for us to
# finish! But that's OK, because we release the lock as soon
# as the underlying stream becomes writable, and the
# self.receive_some call wasn't going to make any progress
# until then anyway.
#
# Of course, this does mean we might return *before* the
# stream is logically writable, because immediately after we
# return self.receive_some might write some data and make it
# non-writable again. But that's OK too,
# wait_send_all_might_not_block only guarantees that it
# doesn't return late.
await self.transport_stream.wait_send_all_might_not_block() | [
"async",
"def",
"wait_send_all_might_not_block",
"(",
"self",
")",
":",
"# This method's implementation is deceptively simple.",
"#",
"# First, we take the outer send lock, because of Trio's standard",
"# semantics that wait_send_all_might_not_block and send_all",
"# conflict.",
"with",
"self",
".",
"_outer_send_conflict_detector",
":",
"self",
".",
"_check_status",
"(",
")",
"# Then we take the inner send lock. We know that no other tasks",
"# are calling self.send_all or self.wait_send_all_might_not_block,",
"# because we have the outer_send_lock. But! There might be another",
"# task calling self.receive_some -> transport_stream.send_all, in",
"# which case if we were to call",
"# transport_stream.wait_send_all_might_not_block directly we'd",
"# have two tasks doing write-related operations on",
"# transport_stream simultaneously, which is not allowed. We",
"# *don't* want to raise this conflict to our caller, because it's",
"# purely an internal affair – all they did was call",
"# wait_send_all_might_not_block and receive_some at the same time,",
"# which is totally valid. And waiting for the lock is OK, because",
"# a call to send_all certainly wouldn't complete while the other",
"# task holds the lock.",
"async",
"with",
"self",
".",
"_inner_send_lock",
":",
"# Now we have the lock, which creates another potential",
"# problem: what if a call to self.receive_some attempts to do",
"# transport_stream.send_all now? It'll have to wait for us to",
"# finish! But that's OK, because we release the lock as soon",
"# as the underlying stream becomes writable, and the",
"# self.receive_some call wasn't going to make any progress",
"# until then anyway.",
"#",
"# Of course, this does mean we might return *before* the",
"# stream is logically writable, because immediately after we",
"# return self.receive_some might write some data and make it",
"# non-writable again. But that's OK too,",
"# wait_send_all_might_not_block only guarantees that it",
"# doesn't return late.",
"await",
"self",
".",
"transport_stream",
".",
"wait_send_all_might_not_block",
"(",
")"
] | [
831,
4
] | [
869,
75
] | python | en | ['en', 'en', 'en'] | False |
SSLListener.accept | (self) | Accept the next connection and wrap it in an :class:`SSLStream`.
See :meth:`trio.abc.Listener.accept` for details.
| Accept the next connection and wrap it in an :class:`SSLStream`. | async def accept(self):
"""Accept the next connection and wrap it in an :class:`SSLStream`.
See :meth:`trio.abc.Listener.accept` for details.
"""
transport_stream = await self.transport_listener.accept()
return SSLStream(
transport_stream,
self._ssl_context,
server_side=True,
https_compatible=self._https_compatible,
) | [
"async",
"def",
"accept",
"(",
"self",
")",
":",
"transport_stream",
"=",
"await",
"self",
".",
"transport_listener",
".",
"accept",
"(",
")",
"return",
"SSLStream",
"(",
"transport_stream",
",",
"self",
".",
"_ssl_context",
",",
"server_side",
"=",
"True",
",",
"https_compatible",
"=",
"self",
".",
"_https_compatible",
",",
")"
] | [
908,
4
] | [
920,
9
] | python | en | ['en', 'en', 'en'] | True |
SSLListener.aclose | (self) | Close the transport listener. | Close the transport listener. | async def aclose(self):
"""Close the transport listener."""
await self.transport_listener.aclose() | [
"async",
"def",
"aclose",
"(",
"self",
")",
":",
"await",
"self",
".",
"transport_listener",
".",
"aclose",
"(",
")"
] | [
922,
4
] | [
924,
46
] | python | en | ['en', 'sd', 'en'] | True |
PDFToTextConverter.__init__ | (self, remove_numeric_tables: bool = False, valid_languages: Optional[List[str]] = None) |
:param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables.
The tabular structures in documents might be noise for the reader model if it
does not have table parsing capability for finding answers. However, tables
may also have long strings that could possible candidate for searching answers.
The rows containing strings are thus retained in this option.
:param valid_languages: validate languages from a list of languages specified in the ISO 639-1
(https://en.wikipedia.org/wiki/ISO_639-1) format.
This option can be used to add test for encoding errors. If the extracted text is
not one of the valid languages, then it might likely be encoding error resulting
in garbled text.
|
:param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables.
The tabular structures in documents might be noise for the reader model if it
does not have table parsing capability for finding answers. However, tables
may also have long strings that could possible candidate for searching answers.
The rows containing strings are thus retained in this option.
:param valid_languages: validate languages from a list of languages specified in the ISO 639-1
(https://en.wikipedia.org/wiki/ISO_639-1) format.
This option can be used to add test for encoding errors. If the extracted text is
not one of the valid languages, then it might likely be encoding error resulting
in garbled text.
| def __init__(self, remove_numeric_tables: bool = False, valid_languages: Optional[List[str]] = None):
"""
:param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables.
The tabular structures in documents might be noise for the reader model if it
does not have table parsing capability for finding answers. However, tables
may also have long strings that could possible candidate for searching answers.
The rows containing strings are thus retained in this option.
:param valid_languages: validate languages from a list of languages specified in the ISO 639-1
(https://en.wikipedia.org/wiki/ISO_639-1) format.
This option can be used to add test for encoding errors. If the extracted text is
not one of the valid languages, then it might likely be encoding error resulting
in garbled text.
"""
verify_installation = subprocess.run(["pdftotext -v"], shell=True)
if verify_installation.returncode == 127:
raise Exception(
"""pdftotext is not installed. It is part of xpdf or poppler-utils software suite.
Installation on Linux:
wget --no-check-certificate https://dl.xpdfreader.com/xpdf-tools-linux-4.03.tar.gz &&
tar -xvf xpdf-tools-linux-4.03.tar.gz && sudo cp xpdf-tools-linux-4.03/bin64/pdftotext /usr/local/bin
Installation on MacOS:
brew install xpdf
You can find more details here: https://www.xpdfreader.com
"""
)
super().__init__(remove_numeric_tables=remove_numeric_tables, valid_languages=valid_languages) | [
"def",
"__init__",
"(",
"self",
",",
"remove_numeric_tables",
":",
"bool",
"=",
"False",
",",
"valid_languages",
":",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
")",
":",
"verify_installation",
"=",
"subprocess",
".",
"run",
"(",
"[",
"\"pdftotext -v\"",
"]",
",",
"shell",
"=",
"True",
")",
"if",
"verify_installation",
".",
"returncode",
"==",
"127",
":",
"raise",
"Exception",
"(",
"\"\"\"pdftotext is not installed. It is part of xpdf or poppler-utils software suite.\n \n Installation on Linux:\n wget --no-check-certificate https://dl.xpdfreader.com/xpdf-tools-linux-4.03.tar.gz &&\n tar -xvf xpdf-tools-linux-4.03.tar.gz && sudo cp xpdf-tools-linux-4.03/bin64/pdftotext /usr/local/bin\n \n Installation on MacOS:\n brew install xpdf\n \n You can find more details here: https://www.xpdfreader.com\n \"\"\"",
")",
"super",
"(",
")",
".",
"__init__",
"(",
"remove_numeric_tables",
"=",
"remove_numeric_tables",
",",
"valid_languages",
"=",
"valid_languages",
")"
] | [
11,
4
] | [
40,
102
] | python | en | ['en', 'error', 'th'] | False |
PDFToTextConverter.convert | (
self,
file_path: Path,
meta: Optional[Dict[str, str]] = None,
remove_numeric_tables: Optional[bool] = None,
valid_languages: Optional[List[str]] = None,
encoding: str = "Latin1",
) |
Extract text from a .pdf file using the pdftotext library (https://www.xpdfreader.com/pdftotext-man.html)
:param file_path: Path to the .pdf file you want to convert
:param meta: Optional dictionary with metadata that shall be attached to all resulting documents.
Can be any custom keys and values.
:param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables.
The tabular structures in documents might be noise for the reader model if it
does not have table parsing capability for finding answers. However, tables
may also have long strings that could possible candidate for searching answers.
The rows containing strings are thus retained in this option.
:param valid_languages: validate languages from a list of languages specified in the ISO 639-1
(https://en.wikipedia.org/wiki/ISO_639-1) format.
This option can be used to add test for encoding errors. If the extracted text is
not one of the valid languages, then it might likely be encoding error resulting
in garbled text.
:param encoding: Encoding that will be passed as -enc parameter to pdftotext. "Latin 1" is the default encoding
of pdftotext. While this works well on many PDFs, it might be needed to switch to "UTF-8" or
others if your doc contains special characters (e.g. German Umlauts, Cyrillic characters ...).
Note: With "UTF-8" we experienced cases, where a simple "fi" gets wrongly parsed as
"xef\xac\x81c" (see test cases). That's why we keep "Latin 1" as default here.
(See list of available encodings by running `pdftotext -listencodings` in the terminal)
|
Extract text from a .pdf file using the pdftotext library (https://www.xpdfreader.com/pdftotext-man.html) | def convert(
self,
file_path: Path,
meta: Optional[Dict[str, str]] = None,
remove_numeric_tables: Optional[bool] = None,
valid_languages: Optional[List[str]] = None,
encoding: str = "Latin1",
) -> Dict[str, Any]:
"""
Extract text from a .pdf file using the pdftotext library (https://www.xpdfreader.com/pdftotext-man.html)
:param file_path: Path to the .pdf file you want to convert
:param meta: Optional dictionary with metadata that shall be attached to all resulting documents.
Can be any custom keys and values.
:param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables.
The tabular structures in documents might be noise for the reader model if it
does not have table parsing capability for finding answers. However, tables
may also have long strings that could possible candidate for searching answers.
The rows containing strings are thus retained in this option.
:param valid_languages: validate languages from a list of languages specified in the ISO 639-1
(https://en.wikipedia.org/wiki/ISO_639-1) format.
This option can be used to add test for encoding errors. If the extracted text is
not one of the valid languages, then it might likely be encoding error resulting
in garbled text.
:param encoding: Encoding that will be passed as -enc parameter to pdftotext. "Latin 1" is the default encoding
of pdftotext. While this works well on many PDFs, it might be needed to switch to "UTF-8" or
others if your doc contains special characters (e.g. German Umlauts, Cyrillic characters ...).
Note: With "UTF-8" we experienced cases, where a simple "fi" gets wrongly parsed as
"xef\xac\x81c" (see test cases). That's why we keep "Latin 1" as default here.
(See list of available encodings by running `pdftotext -listencodings` in the terminal)
"""
pages = self._read_pdf(file_path, layout=False, encoding=encoding)
if remove_numeric_tables is None:
remove_numeric_tables = self.remove_numeric_tables
if valid_languages is None:
valid_languages = self.valid_languages
cleaned_pages = []
for page in pages:
# pdftotext tool provides an option to retain the original physical layout of a PDF page. This behaviour
# can be toggled by using the layout param.
# layout=True
# + table structures get retained better
# - multi-column pages(eg, research papers) gets extracted with text from multiple columns on same line
# layout=False
# + keeps strings in content stream order, hence multi column layout works well
# - cells of tables gets split across line
#
# Here, as a "safe" default, layout is turned off.
lines = page.splitlines()
cleaned_lines = []
for line in lines:
words = line.split()
digits = [word for word in words if any(i.isdigit() for i in word)]
# remove lines having > 40% of words as digits AND not ending with a period(.)
if remove_numeric_tables:
if words and len(digits) / len(words) > 0.4 and not line.strip().endswith("."):
logger.debug(f"Removing line '{line}' from {file_path}")
continue
cleaned_lines.append(line)
page = "\n".join(cleaned_lines)
cleaned_pages.append(page)
if valid_languages:
document_text = "".join(cleaned_pages)
if not self.validate_language(document_text):
logger.warning(
f"The language for {file_path} is not one of {self.valid_languages}. The file may not have "
f"been decoded in the correct text format."
)
text = "\f".join(cleaned_pages)
document = {"text": text, "meta": meta}
return document | [
"def",
"convert",
"(",
"self",
",",
"file_path",
":",
"Path",
",",
"meta",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"str",
"]",
"]",
"=",
"None",
",",
"remove_numeric_tables",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
",",
"valid_languages",
":",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
",",
"encoding",
":",
"str",
"=",
"\"Latin1\"",
",",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"pages",
"=",
"self",
".",
"_read_pdf",
"(",
"file_path",
",",
"layout",
"=",
"False",
",",
"encoding",
"=",
"encoding",
")",
"if",
"remove_numeric_tables",
"is",
"None",
":",
"remove_numeric_tables",
"=",
"self",
".",
"remove_numeric_tables",
"if",
"valid_languages",
"is",
"None",
":",
"valid_languages",
"=",
"self",
".",
"valid_languages",
"cleaned_pages",
"=",
"[",
"]",
"for",
"page",
"in",
"pages",
":",
"# pdftotext tool provides an option to retain the original physical layout of a PDF page. This behaviour",
"# can be toggled by using the layout param.",
"# layout=True",
"# + table structures get retained better",
"# - multi-column pages(eg, research papers) gets extracted with text from multiple columns on same line",
"# layout=False",
"# + keeps strings in content stream order, hence multi column layout works well",
"# - cells of tables gets split across line",
"#",
"# Here, as a \"safe\" default, layout is turned off.",
"lines",
"=",
"page",
".",
"splitlines",
"(",
")",
"cleaned_lines",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"words",
"=",
"line",
".",
"split",
"(",
")",
"digits",
"=",
"[",
"word",
"for",
"word",
"in",
"words",
"if",
"any",
"(",
"i",
".",
"isdigit",
"(",
")",
"for",
"i",
"in",
"word",
")",
"]",
"# remove lines having > 40% of words as digits AND not ending with a period(.)",
"if",
"remove_numeric_tables",
":",
"if",
"words",
"and",
"len",
"(",
"digits",
")",
"/",
"len",
"(",
"words",
")",
">",
"0.4",
"and",
"not",
"line",
".",
"strip",
"(",
")",
".",
"endswith",
"(",
"\".\"",
")",
":",
"logger",
".",
"debug",
"(",
"f\"Removing line '{line}' from {file_path}\"",
")",
"continue",
"cleaned_lines",
".",
"append",
"(",
"line",
")",
"page",
"=",
"\"\\n\"",
".",
"join",
"(",
"cleaned_lines",
")",
"cleaned_pages",
".",
"append",
"(",
"page",
")",
"if",
"valid_languages",
":",
"document_text",
"=",
"\"\"",
".",
"join",
"(",
"cleaned_pages",
")",
"if",
"not",
"self",
".",
"validate_language",
"(",
"document_text",
")",
":",
"logger",
".",
"warning",
"(",
"f\"The language for {file_path} is not one of {self.valid_languages}. The file may not have \"",
"f\"been decoded in the correct text format.\"",
")",
"text",
"=",
"\"\\f\"",
".",
"join",
"(",
"cleaned_pages",
")",
"document",
"=",
"{",
"\"text\"",
":",
"text",
",",
"\"meta\"",
":",
"meta",
"}",
"return",
"document"
] | [
42,
4
] | [
118,
23
] | python | en | ['en', 'error', 'th'] | False |
PDFToTextConverter._read_pdf | (self, file_path: Path, layout: bool, encoding: str) |
Extract pages from the pdf file at file_path.
:param file_path: path of the pdf file
:param layout: whether to retain the original physical layout for a page. If disabled, PDF pages are read in
the content stream order.
|
Extract pages from the pdf file at file_path. | def _read_pdf(self, file_path: Path, layout: bool, encoding: str) -> List[str]:
"""
Extract pages from the pdf file at file_path.
:param file_path: path of the pdf file
:param layout: whether to retain the original physical layout for a page. If disabled, PDF pages are read in
the content stream order.
"""
if layout:
command = ["pdftotext", "-enc", encoding, "-layout", str(file_path), "-"]
else:
command = ["pdftotext", "-enc", encoding, str(file_path), "-"]
output = subprocess.run(command, stdout=subprocess.PIPE, shell=False)
document = output.stdout.decode(errors="ignore")
pages = document.split("\f")
pages = pages[:-1] # the last page in the split is always empty.
return pages | [
"def",
"_read_pdf",
"(",
"self",
",",
"file_path",
":",
"Path",
",",
"layout",
":",
"bool",
",",
"encoding",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"if",
"layout",
":",
"command",
"=",
"[",
"\"pdftotext\"",
",",
"\"-enc\"",
",",
"encoding",
",",
"\"-layout\"",
",",
"str",
"(",
"file_path",
")",
",",
"\"-\"",
"]",
"else",
":",
"command",
"=",
"[",
"\"pdftotext\"",
",",
"\"-enc\"",
",",
"encoding",
",",
"str",
"(",
"file_path",
")",
",",
"\"-\"",
"]",
"output",
"=",
"subprocess",
".",
"run",
"(",
"command",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"shell",
"=",
"False",
")",
"document",
"=",
"output",
".",
"stdout",
".",
"decode",
"(",
"errors",
"=",
"\"ignore\"",
")",
"pages",
"=",
"document",
".",
"split",
"(",
"\"\\f\"",
")",
"pages",
"=",
"pages",
"[",
":",
"-",
"1",
"]",
"# the last page in the split is always empty.",
"return",
"pages"
] | [
120,
4
] | [
136,
20
] | python | en | ['en', 'error', 'th'] | False |
foo | () | ur"""unicode-raw | ur"""unicode-raw | def foo():
ur"""unicode-raw""" | [
"def",
"foo",
"(",
")",
":"
] | [
0,
0
] | [
1,
23
] | python | it | ['pl', 'sn', 'it'] | False |
bar | () | u"""unicode | u"""unicode | def bar():
u"""unicode""" | [
"def",
"bar",
"(",
")",
":"
] | [
3,
0
] | [
4,
18
] | python | en | ['en', 'hr', 'it'] | False |
_clean_check | (cmd, target) |
Run the command to download target. If the command fails, clean up before
re-raising the error.
|
Run the command to download target. If the command fails, clean up before
re-raising the error.
| def _clean_check(cmd, target):
"""
Run the command to download target. If the command fails, clean up before
re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise | [
"def",
"_clean_check",
"(",
"cmd",
",",
"target",
")",
":",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"cmd",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"if",
"os",
".",
"access",
"(",
"target",
",",
"os",
".",
"F_OK",
")",
":",
"os",
".",
"unlink",
"(",
"target",
")",
"raise"
] | [
153,
0
] | [
163,
13
] | python | en | ['en', 'error', 'th'] | False |
download_file_powershell | (url, target) |
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
|
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
| def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
cmd = [
'powershell',
'-Command',
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(),
]
_clean_check(cmd, target) | [
"def",
"download_file_powershell",
"(",
"url",
",",
"target",
")",
":",
"target",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"target",
")",
"cmd",
"=",
"[",
"'powershell'",
",",
"'-Command'",
",",
"\"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)\"",
"%",
"vars",
"(",
")",
",",
"]",
"_clean_check",
"(",
"cmd",
",",
"target",
")"
] | [
165,
0
] | [
176,
29
] | python | en | ['en', 'error', 'th'] | False |
download_file_insecure | (url, target) |
Use Python to download the file, even though it cannot authenticate the
connection.
|
Use Python to download the file, even though it cannot authenticate the
connection.
| def download_file_insecure(url, target):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
src = dst = None
try:
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(target, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close() | [
"def",
"download_file_insecure",
"(",
"url",
",",
"target",
")",
":",
"try",
":",
"from",
"urllib",
".",
"request",
"import",
"urlopen",
"except",
"ImportError",
":",
"from",
"urllib2",
"import",
"urlopen",
"src",
"=",
"dst",
"=",
"None",
"try",
":",
"src",
"=",
"urlopen",
"(",
"url",
")",
"# Read/write all in one block, so we don't create a corrupt file",
"# if the download is interrupted.",
"data",
"=",
"src",
".",
"read",
"(",
")",
"dst",
"=",
"open",
"(",
"target",
",",
"\"wb\"",
")",
"dst",
".",
"write",
"(",
"data",
")",
"finally",
":",
"if",
"src",
":",
"src",
".",
"close",
"(",
")",
"if",
"dst",
":",
"dst",
".",
"close",
"(",
")"
] | [
230,
0
] | [
251,
23
] | python | en | ['en', 'error', 'th'] | False |
download_setuptools | (version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15,
downloader_factory=get_best_downloader) | Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
| Download setuptools from a specified location and return its filename | def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15,
downloader_factory=get_best_downloader):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
tgz_name = "setuptools-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto) | [
"def",
"download_setuptools",
"(",
"version",
"=",
"DEFAULT_VERSION",
",",
"download_base",
"=",
"DEFAULT_URL",
",",
"to_dir",
"=",
"os",
".",
"curdir",
",",
"delay",
"=",
"15",
",",
"downloader_factory",
"=",
"get_best_downloader",
")",
":",
"# making sure we use the absolute path",
"to_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"to_dir",
")",
"tgz_name",
"=",
"\"setuptools-%s.tar.gz\"",
"%",
"version",
"url",
"=",
"download_base",
"+",
"tgz_name",
"saveto",
"=",
"os",
".",
"path",
".",
"join",
"(",
"to_dir",
",",
"tgz_name",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"saveto",
")",
":",
"# Avoid repeated downloads",
"log",
".",
"warn",
"(",
"\"Downloading %s\"",
",",
"url",
")",
"downloader",
"=",
"downloader_factory",
"(",
")",
"downloader",
"(",
"url",
",",
"saveto",
")",
"return",
"os",
".",
"path",
".",
"realpath",
"(",
"saveto",
")"
] | [
267,
0
] | [
290,
35
] | python | en | ['en', 'en', 'en'] | True |
_extractall | (self, path=".", members=None) | Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
| Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
| def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e) | [
"def",
"_extractall",
"(",
"self",
",",
"path",
"=",
"\".\"",
",",
"members",
"=",
"None",
")",
":",
"import",
"copy",
"import",
"operator",
"from",
"tarfile",
"import",
"ExtractError",
"directories",
"=",
"[",
"]",
"if",
"members",
"is",
"None",
":",
"members",
"=",
"self",
"for",
"tarinfo",
"in",
"members",
":",
"if",
"tarinfo",
".",
"isdir",
"(",
")",
":",
"# Extract directories with a safe mode.",
"directories",
".",
"append",
"(",
"tarinfo",
")",
"tarinfo",
"=",
"copy",
".",
"copy",
"(",
"tarinfo",
")",
"tarinfo",
".",
"mode",
"=",
"448",
"# decimal for oct 0700",
"self",
".",
"extract",
"(",
"tarinfo",
",",
"path",
")",
"# Reverse sort directories.",
"if",
"sys",
".",
"version_info",
"<",
"(",
"2",
",",
"4",
")",
":",
"def",
"sorter",
"(",
"dir1",
",",
"dir2",
")",
":",
"return",
"cmp",
"(",
"dir1",
".",
"name",
",",
"dir2",
".",
"name",
")",
"directories",
".",
"sort",
"(",
"sorter",
")",
"directories",
".",
"reverse",
"(",
")",
"else",
":",
"directories",
".",
"sort",
"(",
"key",
"=",
"operator",
".",
"attrgetter",
"(",
"'name'",
")",
",",
"reverse",
"=",
"True",
")",
"# Set correct owner, mtime and filemode on directories.",
"for",
"tarinfo",
"in",
"directories",
":",
"dirpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"tarinfo",
".",
"name",
")",
"try",
":",
"self",
".",
"chown",
"(",
"tarinfo",
",",
"dirpath",
")",
"self",
".",
"utime",
"(",
"tarinfo",
",",
"dirpath",
")",
"self",
".",
"chmod",
"(",
"tarinfo",
",",
"dirpath",
")",
"except",
"ExtractError",
":",
"e",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"if",
"self",
".",
"errorlevel",
">",
"1",
":",
"raise",
"else",
":",
"self",
".",
"_dbg",
"(",
"1",
",",
"\"tarfile: %s\"",
"%",
"e",
")"
] | [
293,
0
] | [
337,
47
] | python | en | ['en', 'en', 'en'] | True |
_build_install_args | (options) |
Build the arguments to 'python setup.py install' on the setuptools package
|
Build the arguments to 'python setup.py install' on the setuptools package
| def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
install_args = []
if options.user_install:
if sys.version_info < (2, 6):
log.warn("--user requires Python 2.6 or later")
raise SystemExit(1)
install_args.append('--user')
return install_args | [
"def",
"_build_install_args",
"(",
"options",
")",
":",
"install_args",
"=",
"[",
"]",
"if",
"options",
".",
"user_install",
":",
"if",
"sys",
".",
"version_info",
"<",
"(",
"2",
",",
"6",
")",
":",
"log",
".",
"warn",
"(",
"\"--user requires Python 2.6 or later\"",
")",
"raise",
"SystemExit",
"(",
"1",
")",
"install_args",
".",
"append",
"(",
"'--user'",
")",
"return",
"install_args"
] | [
340,
0
] | [
350,
23
] | python | en | ['en', 'error', 'th'] | False |
_parse_args | () |
Parse the command line for options
|
Parse the command line for options
| def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
options, args = parser.parse_args()
# positional arguments are ignored
return options | [
"def",
"_parse_args",
"(",
")",
":",
"parser",
"=",
"optparse",
".",
"OptionParser",
"(",
")",
"parser",
".",
"add_option",
"(",
"'--user'",
",",
"dest",
"=",
"'user_install'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'install in user site package (requires Python 2.6 or later)'",
")",
"parser",
".",
"add_option",
"(",
"'--download-base'",
",",
"dest",
"=",
"'download_base'",
",",
"metavar",
"=",
"\"URL\"",
",",
"default",
"=",
"DEFAULT_URL",
",",
"help",
"=",
"'alternative URL from where to download the setuptools package'",
")",
"parser",
".",
"add_option",
"(",
"'--insecure'",
",",
"dest",
"=",
"'downloader_factory'",
",",
"action",
"=",
"'store_const'",
",",
"const",
"=",
"lambda",
":",
"download_file_insecure",
",",
"default",
"=",
"get_best_downloader",
",",
"help",
"=",
"'Use internal, non-validating downloader'",
")",
"options",
",",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"# positional arguments are ignored",
"return",
"options"
] | [
352,
0
] | [
371,
18
] | python | en | ['en', 'error', 'th'] | False |
main | (version=DEFAULT_VERSION) | Install or upgrade setuptools and EasyInstall | Install or upgrade setuptools and EasyInstall | def main(version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
tarball = download_setuptools(download_base=options.download_base,
downloader_factory=options.downloader_factory)
return _install(tarball, _build_install_args(options)) | [
"def",
"main",
"(",
"version",
"=",
"DEFAULT_VERSION",
")",
":",
"options",
"=",
"_parse_args",
"(",
")",
"tarball",
"=",
"download_setuptools",
"(",
"download_base",
"=",
"options",
".",
"download_base",
",",
"downloader_factory",
"=",
"options",
".",
"downloader_factory",
")",
"return",
"_install",
"(",
"tarball",
",",
"_build_install_args",
"(",
"options",
")",
")"
] | [
373,
0
] | [
378,
58
] | python | en | ['en', 'et', 'en'] | True |
ExpectTableRowCountToEqual.validate_configuration | (self, configuration: Optional[ExpectationConfiguration]) |
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
|
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation. | def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
# Setting up a configuration
super().validate_configuration(configuration)
value = configuration.kwargs.get("value")
try:
assert value is not None, "An expected row count must be provided"
if not isinstance(value, (int, dict)):
raise ValueError("Provided row count must be an integer")
if isinstance(value, dict):
assert (
"$PARAMETER" in value
), 'Evaluation Parameter dict for value kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True | [
"def",
"validate_configuration",
"(",
"self",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
")",
":",
"# Setting up a configuration",
"super",
"(",
")",
".",
"validate_configuration",
"(",
"configuration",
")",
"value",
"=",
"configuration",
".",
"kwargs",
".",
"get",
"(",
"\"value\"",
")",
"try",
":",
"assert",
"value",
"is",
"not",
"None",
",",
"\"An expected row count must be provided\"",
"if",
"not",
"isinstance",
"(",
"value",
",",
"(",
"int",
",",
"dict",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Provided row count must be an integer\"",
")",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"assert",
"(",
"\"$PARAMETER\"",
"in",
"value",
")",
",",
"'Evaluation Parameter dict for value kwarg must have \"$PARAMETER\" key.'",
"except",
"AssertionError",
"as",
"e",
":",
"raise",
"InvalidExpectationConfigurationError",
"(",
"str",
"(",
"e",
")",
")",
"return",
"True"
] | [
77,
4
] | [
107,
19
] | python | en | ['en', 'error', 'th'] | False |
MultiError.filter | (cls, handler, root_exc) | Apply the given ``handler`` to all the exceptions in ``root_exc``.
Args:
handler: A callable that takes an atomic (non-MultiError) exception
as input, and returns either a new exception object or None.
root_exc: An exception, often (though not necessarily) a
:exc:`MultiError`.
Returns:
A new exception object in which each component exception ``exc`` has
been replaced by the result of running ``handler(exc)`` – or, if
``handler`` returned None for all the inputs, returns None.
| Apply the given ``handler`` to all the exceptions in ``root_exc``. | def filter(cls, handler, root_exc):
"""Apply the given ``handler`` to all the exceptions in ``root_exc``.
Args:
handler: A callable that takes an atomic (non-MultiError) exception
as input, and returns either a new exception object or None.
root_exc: An exception, often (though not necessarily) a
:exc:`MultiError`.
Returns:
A new exception object in which each component exception ``exc`` has
been replaced by the result of running ``handler(exc)`` – or, if
``handler`` returned None for all the inputs, returns None.
"""
return _filter_impl(handler, root_exc) | [
"def",
"filter",
"(",
"cls",
",",
"handler",
",",
"root_exc",
")",
":",
"return",
"_filter_impl",
"(",
"handler",
",",
"root_exc",
")"
] | [
211,
4
] | [
227,
46
] | python | en | ['en', 'en', 'en'] | True |
MultiError.catch | (cls, handler) | Return a context manager that catches and re-throws exceptions
after running :meth:`filter` on them.
Args:
handler: as for :meth:`filter`
| Return a context manager that catches and re-throws exceptions
after running :meth:`filter` on them. | def catch(cls, handler):
"""Return a context manager that catches and re-throws exceptions
after running :meth:`filter` on them.
Args:
handler: as for :meth:`filter`
"""
return MultiErrorCatcher(handler) | [
"def",
"catch",
"(",
"cls",
",",
"handler",
")",
":",
"return",
"MultiErrorCatcher",
"(",
"handler",
")"
] | [
230,
4
] | [
239,
41
] | python | en | ['en', 'en', 'en'] | True |
get_parameter_value_and_validate_return_type | (
domain: Optional[Domain] = None,
parameter_reference: Optional[Union[Any, str]] = None,
expected_return_type: Optional[Union[type, tuple]] = None,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) |
This method allows for the parameter_reference to be specified as an object (literal, dict, any typed object, etc.)
or as a fully-qualified parameter name. In either case, it can optionally validate the type of the return value.
|
This method allows for the parameter_reference to be specified as an object (literal, dict, any typed object, etc.)
or as a fully-qualified parameter name. In either case, it can optionally validate the type of the return value.
| def get_parameter_value_and_validate_return_type(
domain: Optional[Domain] = None,
parameter_reference: Optional[Union[Any, str]] = None,
expected_return_type: Optional[Union[type, tuple]] = None,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) -> Optional[Any]:
"""
This method allows for the parameter_reference to be specified as an object (literal, dict, any typed object, etc.)
or as a fully-qualified parameter name. In either case, it can optionally validate the type of the return value.
"""
if isinstance(parameter_reference, dict):
parameter_reference = dict(copy.deepcopy(parameter_reference))
parameter_reference = get_parameter_value(
domain=domain,
parameter_reference=parameter_reference,
variables=variables,
parameters=parameters,
)
if expected_return_type is not None:
if not isinstance(parameter_reference, expected_return_type):
raise ge_exceptions.ProfilerExecutionError(
message=f"""Argument "{parameter_reference}" must be of type "{str(expected_return_type)}" \
(value of type "{str(type(parameter_reference))}" was encountered).
"""
)
return parameter_reference | [
"def",
"get_parameter_value_and_validate_return_type",
"(",
"domain",
":",
"Optional",
"[",
"Domain",
"]",
"=",
"None",
",",
"parameter_reference",
":",
"Optional",
"[",
"Union",
"[",
"Any",
",",
"str",
"]",
"]",
"=",
"None",
",",
"expected_return_type",
":",
"Optional",
"[",
"Union",
"[",
"type",
",",
"tuple",
"]",
"]",
"=",
"None",
",",
"variables",
":",
"Optional",
"[",
"ParameterContainer",
"]",
"=",
"None",
",",
"parameters",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"ParameterContainer",
"]",
"]",
"=",
"None",
",",
")",
"->",
"Optional",
"[",
"Any",
"]",
":",
"if",
"isinstance",
"(",
"parameter_reference",
",",
"dict",
")",
":",
"parameter_reference",
"=",
"dict",
"(",
"copy",
".",
"deepcopy",
"(",
"parameter_reference",
")",
")",
"parameter_reference",
"=",
"get_parameter_value",
"(",
"domain",
"=",
"domain",
",",
"parameter_reference",
"=",
"parameter_reference",
",",
"variables",
"=",
"variables",
",",
"parameters",
"=",
"parameters",
",",
")",
"if",
"expected_return_type",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"parameter_reference",
",",
"expected_return_type",
")",
":",
"raise",
"ge_exceptions",
".",
"ProfilerExecutionError",
"(",
"message",
"=",
"f\"\"\"Argument \"{parameter_reference}\" must be of type \"{str(expected_return_type)}\" \\\n(value of type \"{str(type(parameter_reference))}\" was encountered).\n\"\"\"",
")",
"return",
"parameter_reference"
] | [
130,
0
] | [
156,
30
] | python | en | ['en', 'error', 'th'] | False |
get_parameter_value | (
domain: Optional[Domain] = None,
parameter_reference: Optional[Union[Any, str]] = None,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) |
This method allows for the parameter_reference to be specified as an object (literal, dict, any typed object, etc.)
or as a fully-qualified parameter name. Moreover, if the parameter_reference argument is an object of type "dict",
it will recursively detect values using the fully-qualified parameter name format and evaluate them accordingly.
|
This method allows for the parameter_reference to be specified as an object (literal, dict, any typed object, etc.)
or as a fully-qualified parameter name. Moreover, if the parameter_reference argument is an object of type "dict",
it will recursively detect values using the fully-qualified parameter name format and evaluate them accordingly.
| def get_parameter_value(
domain: Optional[Domain] = None,
parameter_reference: Optional[Union[Any, str]] = None,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) -> Optional[Any]:
"""
This method allows for the parameter_reference to be specified as an object (literal, dict, any typed object, etc.)
or as a fully-qualified parameter name. Moreover, if the parameter_reference argument is an object of type "dict",
it will recursively detect values using the fully-qualified parameter name format and evaluate them accordingly.
"""
if isinstance(parameter_reference, dict):
for key, value in parameter_reference.items():
parameter_reference[key] = get_parameter_value(
domain=domain,
parameter_reference=value,
variables=variables,
parameters=parameters,
)
elif isinstance(parameter_reference, str) and parameter_reference.startswith("$"):
parameter_reference = get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name=parameter_reference,
domain=domain,
variables=variables,
parameters=parameters,
)
if isinstance(parameter_reference, dict):
for key, value in parameter_reference.items():
parameter_reference[key] = get_parameter_value(
domain=domain,
parameter_reference=value,
variables=variables,
parameters=parameters,
)
return parameter_reference | [
"def",
"get_parameter_value",
"(",
"domain",
":",
"Optional",
"[",
"Domain",
"]",
"=",
"None",
",",
"parameter_reference",
":",
"Optional",
"[",
"Union",
"[",
"Any",
",",
"str",
"]",
"]",
"=",
"None",
",",
"variables",
":",
"Optional",
"[",
"ParameterContainer",
"]",
"=",
"None",
",",
"parameters",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"ParameterContainer",
"]",
"]",
"=",
"None",
",",
")",
"->",
"Optional",
"[",
"Any",
"]",
":",
"if",
"isinstance",
"(",
"parameter_reference",
",",
"dict",
")",
":",
"for",
"key",
",",
"value",
"in",
"parameter_reference",
".",
"items",
"(",
")",
":",
"parameter_reference",
"[",
"key",
"]",
"=",
"get_parameter_value",
"(",
"domain",
"=",
"domain",
",",
"parameter_reference",
"=",
"value",
",",
"variables",
"=",
"variables",
",",
"parameters",
"=",
"parameters",
",",
")",
"elif",
"isinstance",
"(",
"parameter_reference",
",",
"str",
")",
"and",
"parameter_reference",
".",
"startswith",
"(",
"\"$\"",
")",
":",
"parameter_reference",
"=",
"get_parameter_value_by_fully_qualified_parameter_name",
"(",
"fully_qualified_parameter_name",
"=",
"parameter_reference",
",",
"domain",
"=",
"domain",
",",
"variables",
"=",
"variables",
",",
"parameters",
"=",
"parameters",
",",
")",
"if",
"isinstance",
"(",
"parameter_reference",
",",
"dict",
")",
":",
"for",
"key",
",",
"value",
"in",
"parameter_reference",
".",
"items",
"(",
")",
":",
"parameter_reference",
"[",
"key",
"]",
"=",
"get_parameter_value",
"(",
"domain",
"=",
"domain",
",",
"parameter_reference",
"=",
"value",
",",
"variables",
"=",
"variables",
",",
"parameters",
"=",
"parameters",
",",
")",
"return",
"parameter_reference"
] | [
159,
0
] | [
193,
30
] | python | en | ['en', 'error', 'th'] | False |
impute_missing_data_1D | (data1D) |
This function returns the data in the same format as it was
passed in, but with missing values either masked out or imputed with appropriate values
(currently only using a linear trend). Many linear plotting functions for 1D data often
(and should) only connect contiguous, non-nan data points. This leaves gaps in the
piecewise linear plot, which are sometimes graphically undesirable.
Parameters
----------
data: numpy.ndarray
A 1D NumPy array for which missing values are to be masked or imputed
suitably for at least matplotlib plotting. If formatting for other libraries such
as seaborn or plotly is necessary, add that formatting requirement as a parameter.
|
This function returns the data in the same format as it was
passed in, but with missing values either masked out or imputed with appropriate values
(currently only using a linear trend). Many linear plotting functions for 1D data often
(and should) only connect contiguous, non-nan data points. This leaves gaps in the
piecewise linear plot, which are sometimes graphically undesirable. | def impute_missing_data_1D(data1D):
"""
This function returns the data in the same format as it was
passed in, but with missing values either masked out or imputed with appropriate values
(currently only using a linear trend). Many linear plotting functions for 1D data often
(and should) only connect contiguous, non-nan data points. This leaves gaps in the
piecewise linear plot, which are sometimes graphically undesirable.
Parameters
----------
data: numpy.ndarray
A 1D NumPy array for which missing values are to be masked or imputed
suitably for at least matplotlib plotting. If formatting for other libraries such
as seaborn or plotly is necessary, add that formatting requirement as a parameter.
"""
nan_mask = ~np.isnan(data1D)
x = np.arange(len(data1D))
x_no_nan = x[nan_mask]
data_no_nan = data1D[nan_mask]
if len(x_no_nan) >= 2:
f = interp1d(x_no_nan, data_no_nan)
# Select points for interpolation.
interpolation_x_mask = (x_no_nan[0] <= x) & (x <= x_no_nan[-1])
interpolation_x = x[interpolation_x_mask]
data1D_interp = np.arange(len(data1D), dtype=np.float32)
# The ends of data1D may contain NaNs that must be included.
end_nan_inds = x[(x <= x_no_nan[0]) | (x_no_nan[-1] <= x)]
data1D_interp[end_nan_inds] = np.nan
data1D_interp[interpolation_x_mask] = f(interpolation_x)
return data1D_interp
else: # Cannot interpolate with a single non-nan point.
return data1D | [
"def",
"impute_missing_data_1D",
"(",
"data1D",
")",
":",
"nan_mask",
"=",
"~",
"np",
".",
"isnan",
"(",
"data1D",
")",
"x",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"data1D",
")",
")",
"x_no_nan",
"=",
"x",
"[",
"nan_mask",
"]",
"data_no_nan",
"=",
"data1D",
"[",
"nan_mask",
"]",
"if",
"len",
"(",
"x_no_nan",
")",
">=",
"2",
":",
"f",
"=",
"interp1d",
"(",
"x_no_nan",
",",
"data_no_nan",
")",
"# Select points for interpolation.",
"interpolation_x_mask",
"=",
"(",
"x_no_nan",
"[",
"0",
"]",
"<=",
"x",
")",
"&",
"(",
"x",
"<=",
"x_no_nan",
"[",
"-",
"1",
"]",
")",
"interpolation_x",
"=",
"x",
"[",
"interpolation_x_mask",
"]",
"data1D_interp",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"data1D",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# The ends of data1D may contain NaNs that must be included.",
"end_nan_inds",
"=",
"x",
"[",
"(",
"x",
"<=",
"x_no_nan",
"[",
"0",
"]",
")",
"|",
"(",
"x_no_nan",
"[",
"-",
"1",
"]",
"<=",
"x",
")",
"]",
"data1D_interp",
"[",
"end_nan_inds",
"]",
"=",
"np",
".",
"nan",
"data1D_interp",
"[",
"interpolation_x_mask",
"]",
"=",
"f",
"(",
"interpolation_x",
")",
"return",
"data1D_interp",
"else",
":",
"# Cannot interpolate with a single non-nan point.",
"return",
"data1D"
] | [
33,
0
] | [
64,
21
] | python | en | ['en', 'error', 'th'] | False |
np_dt64_to_str | (np_datetime, fmt='%Y-%m-%d') | Converts a NumPy datetime64 object to a string based on a format string supplied to pandas strftime. | Converts a NumPy datetime64 object to a string based on a format string supplied to pandas strftime. | def np_dt64_to_str(np_datetime, fmt='%Y-%m-%d'):
"""Converts a NumPy datetime64 object to a string based on a format string supplied to pandas strftime."""
return pd.to_datetime(str(np_datetime)).strftime(fmt) | [
"def",
"np_dt64_to_str",
"(",
"np_datetime",
",",
"fmt",
"=",
"'%Y-%m-%d'",
")",
":",
"return",
"pd",
".",
"to_datetime",
"(",
"str",
"(",
"np_datetime",
")",
")",
".",
"strftime",
"(",
"fmt",
")"
] | [
77,
0
] | [
79,
57
] | python | en | ['en', 'en', 'en'] | True |
xarray_plot_data_vars_over_time | (dataset, colors=['orange', 'blue']) |
Plot a line plot of all data variables in an xarray.Dataset on a shared set of axes.
Parameters
----------
dataset: xarray.Dataset
The Dataset containing data variables to plot. The only dimension and coordinate must be 'time'.
colors: list
A list of strings denoting colors for each data variable's points.
For example, 'red' or 'blue' are acceptable.
:Authors:
John Rattz ([email protected])
|
Plot a line plot of all data variables in an xarray.Dataset on a shared set of axes. | def xarray_plot_data_vars_over_time(dataset, colors=['orange', 'blue']):
"""
Plot a line plot of all data variables in an xarray.Dataset on a shared set of axes.
Parameters
----------
dataset: xarray.Dataset
The Dataset containing data variables to plot. The only dimension and coordinate must be 'time'.
colors: list
A list of strings denoting colors for each data variable's points.
For example, 'red' or 'blue' are acceptable.
:Authors:
John Rattz ([email protected])
"""
data_var_names = sorted(list(dataset.data_vars))
len_dataset = dataset.time.size
nan_mask = np.full(len_dataset, True)
for i, data_arr_name in enumerate(data_var_names):
data_arr = dataset[data_arr_name]
nan_mask = nan_mask & data_arr.notnull().values
plt.plot(data_arr[nan_mask], marker='o', c=colors[i])
times = dataset.time.values
date_strs = np.array(list(map(lambda time: np_dt64_to_str(time), times)))
plt.xticks(np.arange(len(date_strs[nan_mask])), date_strs[nan_mask],
rotation=45, ha='right', rotation_mode='anchor')
plt.legend(data_var_names, loc='upper right')
plt.show() | [
"def",
"xarray_plot_data_vars_over_time",
"(",
"dataset",
",",
"colors",
"=",
"[",
"'orange'",
",",
"'blue'",
"]",
")",
":",
"data_var_names",
"=",
"sorted",
"(",
"list",
"(",
"dataset",
".",
"data_vars",
")",
")",
"len_dataset",
"=",
"dataset",
".",
"time",
".",
"size",
"nan_mask",
"=",
"np",
".",
"full",
"(",
"len_dataset",
",",
"True",
")",
"for",
"i",
",",
"data_arr_name",
"in",
"enumerate",
"(",
"data_var_names",
")",
":",
"data_arr",
"=",
"dataset",
"[",
"data_arr_name",
"]",
"nan_mask",
"=",
"nan_mask",
"&",
"data_arr",
".",
"notnull",
"(",
")",
".",
"values",
"plt",
".",
"plot",
"(",
"data_arr",
"[",
"nan_mask",
"]",
",",
"marker",
"=",
"'o'",
",",
"c",
"=",
"colors",
"[",
"i",
"]",
")",
"times",
"=",
"dataset",
".",
"time",
".",
"values",
"date_strs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"lambda",
"time",
":",
"np_dt64_to_str",
"(",
"time",
")",
",",
"times",
")",
")",
")",
"plt",
".",
"xticks",
"(",
"np",
".",
"arange",
"(",
"len",
"(",
"date_strs",
"[",
"nan_mask",
"]",
")",
")",
",",
"date_strs",
"[",
"nan_mask",
"]",
",",
"rotation",
"=",
"45",
",",
"ha",
"=",
"'right'",
",",
"rotation_mode",
"=",
"'anchor'",
")",
"plt",
".",
"legend",
"(",
"data_var_names",
",",
"loc",
"=",
"'upper right'",
")",
"plt",
".",
"show",
"(",
")"
] | [
119,
0
] | [
146,
14
] | python | en | ['en', 'error', 'th'] | False |
xarray_scatterplot_data_vars | (dataset, figure_kwargs={'figsize': (12, 6)}, colors=['blue', 'orange'], markersize=5) |
Plot a scatterplot of all data variables in an xarray.Dataset on a shared set of axes.
Currently requires a 'time' coordinate, which constitutes the x-axis.
Parameters
----------
dataset: xarray.Dataset
The Dataset containing data variables to plot.
frac_dates: float
The fraction of dates to label on the x-axis.
figure_kwargs: dict
A dictionary of kwargs for matplotlib figure creation.
colors: list
A list of strings denoting abbreviated colors for each data variable's points.
For example, 'r' is red and 'b' is blue.
markersize: float
The size of markers in the scatterplot.
:Authors:
John Rattz ([email protected])
|
Plot a scatterplot of all data variables in an xarray.Dataset on a shared set of axes.
Currently requires a 'time' coordinate, which constitutes the x-axis. | def xarray_scatterplot_data_vars(dataset, figure_kwargs={'figsize': (12, 6)}, colors=['blue', 'orange'], markersize=5):
"""
Plot a scatterplot of all data variables in an xarray.Dataset on a shared set of axes.
Currently requires a 'time' coordinate, which constitutes the x-axis.
Parameters
----------
dataset: xarray.Dataset
The Dataset containing data variables to plot.
frac_dates: float
The fraction of dates to label on the x-axis.
figure_kwargs: dict
A dictionary of kwargs for matplotlib figure creation.
colors: list
A list of strings denoting abbreviated colors for each data variable's points.
For example, 'r' is red and 'b' is blue.
markersize: float
The size of markers in the scatterplot.
:Authors:
John Rattz ([email protected])
"""
plt.figure(**figure_kwargs)
data_var_names = list(dataset.data_vars)
len_dataset = dataset.time.size
nan_mask = np.full(len_dataset, True)
for i, data_arr in enumerate(dataset.data_vars.values()):
if len(list(dataset.dims)) > 1:
dims_to_check_for_nulls = [dim for dim in list(dataset.dims) if dim != 'time']
nan_mask = nan_mask & data_arr.notnull().any(dim=dims_to_check_for_nulls).values
else:
nan_mask = data_arr.notnull().values
times = data_arr.to_dataframe().index.get_level_values('time').values
plt.scatter(stats.rankdata(times, method='dense') - 1, data_arr.values.flatten(), c=colors[i], s=markersize)
unique_times = dataset.time.values
date_strs = np.array(list(map(lambda time: np_dt64_to_str(time), unique_times)))
plt.xticks(np.arange(len(date_strs))[nan_mask], date_strs[nan_mask],
rotation=45, ha='right', rotation_mode='anchor')
plt.xlabel('time')
plt.legend(data_var_names, loc='upper right')
plt.show() | [
"def",
"xarray_scatterplot_data_vars",
"(",
"dataset",
",",
"figure_kwargs",
"=",
"{",
"'figsize'",
":",
"(",
"12",
",",
"6",
")",
"}",
",",
"colors",
"=",
"[",
"'blue'",
",",
"'orange'",
"]",
",",
"markersize",
"=",
"5",
")",
":",
"plt",
".",
"figure",
"(",
"*",
"*",
"figure_kwargs",
")",
"data_var_names",
"=",
"list",
"(",
"dataset",
".",
"data_vars",
")",
"len_dataset",
"=",
"dataset",
".",
"time",
".",
"size",
"nan_mask",
"=",
"np",
".",
"full",
"(",
"len_dataset",
",",
"True",
")",
"for",
"i",
",",
"data_arr",
"in",
"enumerate",
"(",
"dataset",
".",
"data_vars",
".",
"values",
"(",
")",
")",
":",
"if",
"len",
"(",
"list",
"(",
"dataset",
".",
"dims",
")",
")",
">",
"1",
":",
"dims_to_check_for_nulls",
"=",
"[",
"dim",
"for",
"dim",
"in",
"list",
"(",
"dataset",
".",
"dims",
")",
"if",
"dim",
"!=",
"'time'",
"]",
"nan_mask",
"=",
"nan_mask",
"&",
"data_arr",
".",
"notnull",
"(",
")",
".",
"any",
"(",
"dim",
"=",
"dims_to_check_for_nulls",
")",
".",
"values",
"else",
":",
"nan_mask",
"=",
"data_arr",
".",
"notnull",
"(",
")",
".",
"values",
"times",
"=",
"data_arr",
".",
"to_dataframe",
"(",
")",
".",
"index",
".",
"get_level_values",
"(",
"'time'",
")",
".",
"values",
"plt",
".",
"scatter",
"(",
"stats",
".",
"rankdata",
"(",
"times",
",",
"method",
"=",
"'dense'",
")",
"-",
"1",
",",
"data_arr",
".",
"values",
".",
"flatten",
"(",
")",
",",
"c",
"=",
"colors",
"[",
"i",
"]",
",",
"s",
"=",
"markersize",
")",
"unique_times",
"=",
"dataset",
".",
"time",
".",
"values",
"date_strs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"lambda",
"time",
":",
"np_dt64_to_str",
"(",
"time",
")",
",",
"unique_times",
")",
")",
")",
"plt",
".",
"xticks",
"(",
"np",
".",
"arange",
"(",
"len",
"(",
"date_strs",
")",
")",
"[",
"nan_mask",
"]",
",",
"date_strs",
"[",
"nan_mask",
"]",
",",
"rotation",
"=",
"45",
",",
"ha",
"=",
"'right'",
",",
"rotation_mode",
"=",
"'anchor'",
")",
"plt",
".",
"xlabel",
"(",
"'time'",
")",
"plt",
".",
"legend",
"(",
"data_var_names",
",",
"loc",
"=",
"'upper right'",
")",
"plt",
".",
"show",
"(",
")"
] | [
149,
0
] | [
189,
14
] | python | en | ['en', 'error', 'th'] | False |
xarray_plot_ndvi_boxplot_wofs_lineplot_over_time | (dataset, resolution=None, colors=['orange', 'blue']) |
For an xarray.Dataset, plot a boxplot of NDVI and line plot of WOFS across time.
Parameters
----------
dataset: xarray.Dataset
A Dataset formatted as follows:
coordinates: time, latitude, longitude.
data variables: ndvi, wofs
resolution: str
Denotes the resolution of aggregation. Only options are None or 'weekly'.
colors: list
A list of strings denoting colors for each data variable's points.
For example, 'red' or 'blue' are acceptable.
:Authors:
John Rattz ([email protected])
|
For an xarray.Dataset, plot a boxplot of NDVI and line plot of WOFS across time. | def xarray_plot_ndvi_boxplot_wofs_lineplot_over_time(dataset, resolution=None, colors=['orange', 'blue']):
"""
For an xarray.Dataset, plot a boxplot of NDVI and line plot of WOFS across time.
Parameters
----------
dataset: xarray.Dataset
A Dataset formatted as follows:
coordinates: time, latitude, longitude.
data variables: ndvi, wofs
resolution: str
Denotes the resolution of aggregation. Only options are None or 'weekly'.
colors: list
A list of strings denoting colors for each data variable's points.
For example, 'red' or 'blue' are acceptable.
:Authors:
John Rattz ([email protected])
"""
plotting_data = dataset.stack(lat_lon=('latitude', 'longitude'))
time_agg_str = 'weekofyear' if resolution is not None and resolution == 'weekly' else 'time'
if time_agg_str != 'time':
plotting_data = plotting_data.groupby('time.' + time_agg_str).mean(dim='time')
fig, ax = plt.subplots(figsize=(9, 6))
ndvi_box_color, wofs_line_color = ('orange', 'blue')
times = plotting_data[time_agg_str].values
# NDVI boxplot boxes
# The data formatted for matplotlib.pyplot.boxplot().
ndvi_formatted_data = xr.DataArray(np.full_like(plotting_data.ndvi.values, np.nan))
for i, time in enumerate(times):
ndvi_formatted_data.loc[i, :] = plotting_data.loc[{time_agg_str: time}].ndvi.values
ndvi_nan_mask = ~np.isnan(ndvi_formatted_data)
filtered_formatted_data = [] # Data formatted for matplotlib.pyplot.boxplot().
acq_inds_to_keep = [] # Indices of acquisitions to keep. Other indicies contain all nan values.
for i, (d, m) in enumerate(zip(ndvi_formatted_data, ndvi_nan_mask)):
if len(d[m] != 0):
filtered_formatted_data.append(d[m])
acq_inds_to_keep.append(i)
times_no_nan = times[acq_inds_to_keep]
epochs = np.array(list(map(n64_to_epoch, times_no_nan))) if time_agg_str == 'time' else None
x_locs = epochs if time_agg_str == 'time' else times_no_nan
box_width = 0.5 * np.min(np.diff(x_locs))
bp = ax.boxplot(filtered_formatted_data, widths=[box_width] * len(filtered_formatted_data),
positions=x_locs, patch_artist=True, boxprops=dict(facecolor=ndvi_box_color),
flierprops=dict(marker='o', markersize=0.25),
manage_xticks=False) # `manage_xticks=False` to avoid excessive padding on the x-axis.
# WOFS line
wofs_formatted_data = xr.DataArray(np.full_like(plotting_data.wofs.values, np.nan))
for i, time in enumerate(times):
wofs_formatted_data.loc[i, :] = plotting_data.loc[{time_agg_str: time}].wofs.values
wofs_line_plot_data = np.nanmean(wofs_formatted_data.values, axis=1)
wofs_nan_mask = ~np.isnan(wofs_line_plot_data)
line = ax.plot(x_locs, wofs_line_plot_data[wofs_nan_mask], c=wofs_line_color)
date_strs = np.array(list(map(lambda time: np_dt64_to_str(time), times_no_nan))) if time_agg_str == 'time' else \
naive_months_ticks_by_week(times_no_nan)
x_labels = date_strs
plt.xticks(x_locs, x_labels, rotation=45, ha='right', rotation_mode='anchor')
plt.legend(handles=[bp['boxes'][0], line[0]], labels=list(plotting_data.data_vars), loc='best')
plt.tight_layout()
plt.show() | [
"def",
"xarray_plot_ndvi_boxplot_wofs_lineplot_over_time",
"(",
"dataset",
",",
"resolution",
"=",
"None",
",",
"colors",
"=",
"[",
"'orange'",
",",
"'blue'",
"]",
")",
":",
"plotting_data",
"=",
"dataset",
".",
"stack",
"(",
"lat_lon",
"=",
"(",
"'latitude'",
",",
"'longitude'",
")",
")",
"time_agg_str",
"=",
"'weekofyear'",
"if",
"resolution",
"is",
"not",
"None",
"and",
"resolution",
"==",
"'weekly'",
"else",
"'time'",
"if",
"time_agg_str",
"!=",
"'time'",
":",
"plotting_data",
"=",
"plotting_data",
".",
"groupby",
"(",
"'time.'",
"+",
"time_agg_str",
")",
".",
"mean",
"(",
"dim",
"=",
"'time'",
")",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"figsize",
"=",
"(",
"9",
",",
"6",
")",
")",
"ndvi_box_color",
",",
"wofs_line_color",
"=",
"(",
"'orange'",
",",
"'blue'",
")",
"times",
"=",
"plotting_data",
"[",
"time_agg_str",
"]",
".",
"values",
"# NDVI boxplot boxes",
"# The data formatted for matplotlib.pyplot.boxplot().",
"ndvi_formatted_data",
"=",
"xr",
".",
"DataArray",
"(",
"np",
".",
"full_like",
"(",
"plotting_data",
".",
"ndvi",
".",
"values",
",",
"np",
".",
"nan",
")",
")",
"for",
"i",
",",
"time",
"in",
"enumerate",
"(",
"times",
")",
":",
"ndvi_formatted_data",
".",
"loc",
"[",
"i",
",",
":",
"]",
"=",
"plotting_data",
".",
"loc",
"[",
"{",
"time_agg_str",
":",
"time",
"}",
"]",
".",
"ndvi",
".",
"values",
"ndvi_nan_mask",
"=",
"~",
"np",
".",
"isnan",
"(",
"ndvi_formatted_data",
")",
"filtered_formatted_data",
"=",
"[",
"]",
"# Data formatted for matplotlib.pyplot.boxplot().",
"acq_inds_to_keep",
"=",
"[",
"]",
"# Indices of acquisitions to keep. Other indicies contain all nan values.",
"for",
"i",
",",
"(",
"d",
",",
"m",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"ndvi_formatted_data",
",",
"ndvi_nan_mask",
")",
")",
":",
"if",
"len",
"(",
"d",
"[",
"m",
"]",
"!=",
"0",
")",
":",
"filtered_formatted_data",
".",
"append",
"(",
"d",
"[",
"m",
"]",
")",
"acq_inds_to_keep",
".",
"append",
"(",
"i",
")",
"times_no_nan",
"=",
"times",
"[",
"acq_inds_to_keep",
"]",
"epochs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"n64_to_epoch",
",",
"times_no_nan",
")",
")",
")",
"if",
"time_agg_str",
"==",
"'time'",
"else",
"None",
"x_locs",
"=",
"epochs",
"if",
"time_agg_str",
"==",
"'time'",
"else",
"times_no_nan",
"box_width",
"=",
"0.5",
"*",
"np",
".",
"min",
"(",
"np",
".",
"diff",
"(",
"x_locs",
")",
")",
"bp",
"=",
"ax",
".",
"boxplot",
"(",
"filtered_formatted_data",
",",
"widths",
"=",
"[",
"box_width",
"]",
"*",
"len",
"(",
"filtered_formatted_data",
")",
",",
"positions",
"=",
"x_locs",
",",
"patch_artist",
"=",
"True",
",",
"boxprops",
"=",
"dict",
"(",
"facecolor",
"=",
"ndvi_box_color",
")",
",",
"flierprops",
"=",
"dict",
"(",
"marker",
"=",
"'o'",
",",
"markersize",
"=",
"0.25",
")",
",",
"manage_xticks",
"=",
"False",
")",
"# `manage_xticks=False` to avoid excessive padding on the x-axis.",
"# WOFS line",
"wofs_formatted_data",
"=",
"xr",
".",
"DataArray",
"(",
"np",
".",
"full_like",
"(",
"plotting_data",
".",
"wofs",
".",
"values",
",",
"np",
".",
"nan",
")",
")",
"for",
"i",
",",
"time",
"in",
"enumerate",
"(",
"times",
")",
":",
"wofs_formatted_data",
".",
"loc",
"[",
"i",
",",
":",
"]",
"=",
"plotting_data",
".",
"loc",
"[",
"{",
"time_agg_str",
":",
"time",
"}",
"]",
".",
"wofs",
".",
"values",
"wofs_line_plot_data",
"=",
"np",
".",
"nanmean",
"(",
"wofs_formatted_data",
".",
"values",
",",
"axis",
"=",
"1",
")",
"wofs_nan_mask",
"=",
"~",
"np",
".",
"isnan",
"(",
"wofs_line_plot_data",
")",
"line",
"=",
"ax",
".",
"plot",
"(",
"x_locs",
",",
"wofs_line_plot_data",
"[",
"wofs_nan_mask",
"]",
",",
"c",
"=",
"wofs_line_color",
")",
"date_strs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"lambda",
"time",
":",
"np_dt64_to_str",
"(",
"time",
")",
",",
"times_no_nan",
")",
")",
")",
"if",
"time_agg_str",
"==",
"'time'",
"else",
"naive_months_ticks_by_week",
"(",
"times_no_nan",
")",
"x_labels",
"=",
"date_strs",
"plt",
".",
"xticks",
"(",
"x_locs",
",",
"x_labels",
",",
"rotation",
"=",
"45",
",",
"ha",
"=",
"'right'",
",",
"rotation_mode",
"=",
"'anchor'",
")",
"plt",
".",
"legend",
"(",
"handles",
"=",
"[",
"bp",
"[",
"'boxes'",
"]",
"[",
"0",
"]",
",",
"line",
"[",
"0",
"]",
"]",
",",
"labels",
"=",
"list",
"(",
"plotting_data",
".",
"data_vars",
")",
",",
"loc",
"=",
"'best'",
")",
"plt",
".",
"tight_layout",
"(",
")",
"plt",
".",
"show",
"(",
")"
] | [
192,
0
] | [
255,
14
] | python | en | ['en', 'error', 'th'] | False |
xarray_time_series_plot | (dataset, plot_descs, x_coord='longitude',
y_coord='latitude', fig_params=None,
fig=None, ax=None, show_legend=True, title=None,
max_times_per_plot=None, max_cols=1) |
Plot data variables in an xarray.Dataset together in one figure, with different
plot types for each (e.g. box-and-whisker plot, line plot, scatter plot), and
optional curve fitting to aggregations along time. Handles data binned with
xarray.Dataset methods resample() and groupby(). That is, it handles data
binned along time (e.g. by week) or across years (e.g. by week of year).
Parameters
-----------
dataset: xarray.Dataset
A Dataset containing some bands like NDVI or WOFS.
It must have time, x, and y coordinates with names specified by
the 'x_coord' and 'y_coord' parameters.
plot_descs: dict
Dictionary mapping names of DataArrays in the Dataset to plot to
dictionaries mapping aggregation types (e.g. 'mean', 'median') to
lists of dictionaries mapping plot types
(e.g. 'line', 'box', 'scatter') to keyword arguments for plotting.
Aggregation happens within time slices and can be many-to-many or many-to-one.
Some plot types require many-to-many aggregation (e.g. 'none'), and some other plot types
require many-to-one aggregation (e.g. 'mean'). Aggregation types can be any of
['min', 'mean', 'median', 'none', 'max'], with 'none' performing no aggregation.
Plot types can be any of
['scatter', 'line', 'box', 'gaussian', 'gaussian_filter', 'poly', 'cubic_spline', 'fourier'].
Here are the required arguments, with format {plot_type: {arg_name: (data_type[, description]}}:
{'poly':
{'degree': (int, "the degree of the polynomial to fit.")}}
Here are the optional arguments, with format {plot_type: {arg_name: (data_type[, description]}}:
{'box': # See matplotlib.axes.Axes.boxplot() for more information.
{'boxprops': dict, 'flierprops': dict, 'showfliers': bool},
'gaussian_filter': # See gaussian_filter_fit() in data_cube_utilities/curve_fitting.py for more information.
{'sigma': numeric},
'fourier':
{'extrap_time': (string, "a positive integer followed by Y, M, or D -
year, month, or day - specifying the
amount of time to extrapolate over."),
'extrap_color': (matplotlib color, "a matplotlib color to color the extrapolated data with.")}}
Additionally, all of the curve fits (['gaussian', 'gaussian_filter', 'poly',
'cubic_spline', 'fourier']) support an optional 'smooth' boolean parameter.
If true, the curve fit is smoothed, otherwise it will look no smoother than the original data.
Here is an example:
{'ndvi':{'mean':[{'line':{'color':'forestgreen', 'alpha':alpha}}],
'none':[{'box':{'boxprops':{'facecolor':'forestgreen','alpha':alpha},
'showfliers':False}}]}}
This example will create a green line plot of the mean of the 'ndvi' band
as well as a green box plot of the 'ndvi' band.
x_coord, y_coord: str
Names of the x and y coordinates in `dataset`.
fig_params: dict
Figure parameters dictionary (e.g. {'figsize':(12,6)}). Used to create a Figure
``if fig is None and ax is None`.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If only `fig` is supplied, the Axes object used will be the first. This
argument is ignored if ``max_times_per_plot`` is less than the number of times.
ax: matplotlib.axes.Axes
The axes to use for the plot. This argument is ignored if
``max_times_per_plot`` is less than the number of times.
show_legend: bool
Whether or not to show the legend.
title: str
The title of each subplot. Note that a date range enclosed in parenthesis
will be postpended whether this is specified or not.
max_times_per_plot: int
The maximum number of times per plot. If specified, multiple plots may be created,
with each plot having as close to `num_times/max_times_per_plot` number of points
as possible, where `num_times` is the total number of plotting points, including
extrapolations. The plots will be arranged in a row-major grid, with the number
of columns being at most `max_cols`.
max_cols: int
The maximum number of columns in the plot grid.
Returns
-------
fig: matplotlib.figure.Figure
The figure containing the plot grid.
plotting_data: dict
A dictionary mapping 3-tuples of data array names, aggregation types, and plot types
(e.g. ('ndvi', 'none', 'box')) to `xarray.DataArray` objects of the data that was
plotted for those combinations of aggregation types and plot types.
Raises
------
ValueError:
If an aggregation type is not possible for a plot type
:Authors:
John Rattz ([email protected])
|
Plot data variables in an xarray.Dataset together in one figure, with different
plot types for each (e.g. box-and-whisker plot, line plot, scatter plot), and
optional curve fitting to aggregations along time. Handles data binned with
xarray.Dataset methods resample() and groupby(). That is, it handles data
binned along time (e.g. by week) or across years (e.g. by week of year). | def xarray_time_series_plot(dataset, plot_descs, x_coord='longitude',
y_coord='latitude', fig_params=None,
fig=None, ax=None, show_legend=True, title=None,
max_times_per_plot=None, max_cols=1):
"""
Plot data variables in an xarray.Dataset together in one figure, with different
plot types for each (e.g. box-and-whisker plot, line plot, scatter plot), and
optional curve fitting to aggregations along time. Handles data binned with
xarray.Dataset methods resample() and groupby(). That is, it handles data
binned along time (e.g. by week) or across years (e.g. by week of year).
Parameters
-----------
dataset: xarray.Dataset
A Dataset containing some bands like NDVI or WOFS.
It must have time, x, and y coordinates with names specified by
the 'x_coord' and 'y_coord' parameters.
plot_descs: dict
Dictionary mapping names of DataArrays in the Dataset to plot to
dictionaries mapping aggregation types (e.g. 'mean', 'median') to
lists of dictionaries mapping plot types
(e.g. 'line', 'box', 'scatter') to keyword arguments for plotting.
Aggregation happens within time slices and can be many-to-many or many-to-one.
Some plot types require many-to-many aggregation (e.g. 'none'), and some other plot types
require many-to-one aggregation (e.g. 'mean'). Aggregation types can be any of
['min', 'mean', 'median', 'none', 'max'], with 'none' performing no aggregation.
Plot types can be any of
['scatter', 'line', 'box', 'gaussian', 'gaussian_filter', 'poly', 'cubic_spline', 'fourier'].
Here are the required arguments, with format {plot_type: {arg_name: (data_type[, description]}}:
{'poly':
{'degree': (int, "the degree of the polynomial to fit.")}}
Here are the optional arguments, with format {plot_type: {arg_name: (data_type[, description]}}:
{'box': # See matplotlib.axes.Axes.boxplot() for more information.
{'boxprops': dict, 'flierprops': dict, 'showfliers': bool},
'gaussian_filter': # See gaussian_filter_fit() in data_cube_utilities/curve_fitting.py for more information.
{'sigma': numeric},
'fourier':
{'extrap_time': (string, "a positive integer followed by Y, M, or D -
year, month, or day - specifying the
amount of time to extrapolate over."),
'extrap_color': (matplotlib color, "a matplotlib color to color the extrapolated data with.")}}
Additionally, all of the curve fits (['gaussian', 'gaussian_filter', 'poly',
'cubic_spline', 'fourier']) support an optional 'smooth' boolean parameter.
If true, the curve fit is smoothed, otherwise it will look no smoother than the original data.
Here is an example:
{'ndvi':{'mean':[{'line':{'color':'forestgreen', 'alpha':alpha}}],
'none':[{'box':{'boxprops':{'facecolor':'forestgreen','alpha':alpha},
'showfliers':False}}]}}
This example will create a green line plot of the mean of the 'ndvi' band
as well as a green box plot of the 'ndvi' band.
x_coord, y_coord: str
Names of the x and y coordinates in `dataset`.
fig_params: dict
Figure parameters dictionary (e.g. {'figsize':(12,6)}). Used to create a Figure
``if fig is None and ax is None`.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If only `fig` is supplied, the Axes object used will be the first. This
argument is ignored if ``max_times_per_plot`` is less than the number of times.
ax: matplotlib.axes.Axes
The axes to use for the plot. This argument is ignored if
``max_times_per_plot`` is less than the number of times.
show_legend: bool
Whether or not to show the legend.
title: str
The title of each subplot. Note that a date range enclosed in parenthesis
will be postpended whether this is specified or not.
max_times_per_plot: int
The maximum number of times per plot. If specified, multiple plots may be created,
with each plot having as close to `num_times/max_times_per_plot` number of points
as possible, where `num_times` is the total number of plotting points, including
extrapolations. The plots will be arranged in a row-major grid, with the number
of columns being at most `max_cols`.
max_cols: int
The maximum number of columns in the plot grid.
Returns
-------
fig: matplotlib.figure.Figure
The figure containing the plot grid.
plotting_data: dict
A dictionary mapping 3-tuples of data array names, aggregation types, and plot types
(e.g. ('ndvi', 'none', 'box')) to `xarray.DataArray` objects of the data that was
plotted for those combinations of aggregation types and plot types.
Raises
------
ValueError:
If an aggregation type is not possible for a plot type
:Authors:
John Rattz ([email protected])
"""
fig_params = {} if fig_params is None else fig_params
# Lists of plot types that can and cannot accept many-to-one aggregation
# for each time slice, as well as plot types that support extrapolation.
plot_types_requiring_aggregation = ['line', 'gaussian', 'gaussian_filter', 'poly',
'cubic_spline', 'fourier']
plot_types_handling_aggregation = ['scatter'] + plot_types_requiring_aggregation
plot_types_not_handling_aggregation = ['box']
plot_types_curve_fit = ['gaussian', 'gaussian_filter', 'poly',
'cubic_spline', 'fourier']
plot_types_supporting_extrapolation = ['fourier']
all_plot_types = list(set(plot_types_requiring_aggregation + plot_types_handling_aggregation + \
plot_types_not_handling_aggregation + plot_types_curve_fit + \
plot_types_supporting_extrapolation))
# Aggregation types that aggregate all values for a given time to one value.
many_to_one_agg_types = ['min', 'mean', 'median', 'max']
# Aggregation types that aggregate to many values or do not aggregate.
many_to_many_agg_types = ['none']
all_agg_types = many_to_one_agg_types + many_to_many_agg_types
# Determine how the data was aggregated, if at all.
possible_time_agg_strs = ['time', 'week', 'month']
time_agg_str = 'time'
for possible_time_agg_str in possible_time_agg_strs:
if possible_time_agg_str in list(dataset.coords):
time_agg_str = possible_time_agg_str
break
# Make the data 2D - time and a stack of all other dimensions.
all_plotting_data_arrs = list(plot_descs.keys())
all_plotting_data = dataset[all_plotting_data_arrs]
all_times = all_plotting_data[time_agg_str].values
# Mask out times for which no data variable to plot has any non-NaN data.
nan_mask_data_vars = list(all_plotting_data[all_plotting_data_arrs] \
.notnull().data_vars.values())
for i, data_var in enumerate(nan_mask_data_vars):
time_nan_mask = data_var if i == 0 else time_nan_mask | data_var
time_nan_mask = time_nan_mask.any([x_coord, y_coord])
times_not_all_nan = all_times[time_nan_mask.values]
non_nan_plotting_data = all_plotting_data.loc[{time_agg_str: times_not_all_nan}]
# Determine the number of extrapolation data points. #
extrap_day_range = 0
n_extrap_pts = 0
# For each data array to plot...
for data_arr_name, agg_dict in plot_descs.items():
# For each aggregation type (e.g. 'mean', 'median')...
for agg_type, plot_dicts in agg_dict.items():
# For each plot for this aggregation type...
for i, plot_dict in enumerate(plot_dicts):
for plot_type, plot_kwargs in plot_dict.items():
# Only check the plot types supporting extrapolation.
if plot_type == 'fourier':
curr_extrap_day_range = 0
n_predict = 0 # Default to no extrapolation.
# Addressing this way to modify `plot_descs`.
extrap_time = plot_kwargs.get('extrap_time', None)
if extrap_time is not None:
assert time_agg_str == 'time', \
"Extrapolating for data with a time dimension other than 'time' - " \
"such as 'month', or 'week' - is not supported. A time dimension of 'month' " \
"or 'week' denotes data aggregated for each month or week across years, so " \
"extrapolation is meaningless in that case. Support for a time dimension of 'year' " \
"has not yet been added."
# Determine the number of points to extrapolate (in an approximate manner).
# First find the time range of the given data.
first_last_days = list(map(lambda np_dt_64: _n64_to_datetime(np_dt_64),
non_nan_plotting_data.time.values[[0, -1]]))
year_range = first_last_days[1].year - first_last_days[0].year
month_range = first_last_days[1].month - first_last_days[0].month
day_range = first_last_days[1].day - first_last_days[0].day
day_range = year_range * 365.25 + month_range * 30 + day_range
# Then find the time range of the extrapolation string.
fields = re.match(r"(?P<num>[0-9]{0,5})(?P<unit>[YMD])", extrap_time)
assert fields is not None, \
r"For the '{}' DataArray: When using 'fourier' as " \
"the fit type, if the 'extrap_time' parameter is supplied, it must be " \
"a string containing a positive integer followed by one of ['Y', 'M', or 'D']." \
.format(data_arr_name)
num, unit = int(fields['num']), fields['unit']
days_per_unit = dict(Y=365.25, M=30, D=1)[unit]
curr_extrap_day_range = num * days_per_unit
n_predict = round(len(non_nan_plotting_data[time_agg_str]) *
(curr_extrap_day_range / day_range))
plot_descs[data_arr_name][agg_type][i][plot_type] \
['n_predict'] = n_predict
# This parameter is used by get_curvefit() later.
extrap_day_range = max(extrap_day_range, curr_extrap_day_range)
n_extrap_pts = max(n_extrap_pts, n_predict)
# Collect (1) the times not containing only NaN values and (2) the extrapolation times.
if time_agg_str == 'time' and len(times_not_all_nan) > 0:
first_extrap_time = times_not_all_nan[-1] + np.timedelta64(extrap_day_range, 'D') / n_extrap_pts
last_extrap_time = times_not_all_nan[-1] + np.timedelta64(extrap_day_range, 'D')
extrap_times = np.linspace(_n64_datetime_to_scalar(first_extrap_time),
_n64_datetime_to_scalar(last_extrap_time), num=n_extrap_pts)
extrap_times = np.array(list(map(_scalar_to_n64_datetime, extrap_times)))
times_not_all_nan_and_extrap = np.concatenate((times_not_all_nan, extrap_times)) \
if len(extrap_times) > 0 else times_not_all_nan
else:
times_not_all_nan_and_extrap = times_not_all_nan
# Compute all of the plotting data - handling aggregations and extrapolations.
plotting_data_not_nan_and_extrap = {} # Maps data arary names to plotting data (NumPy arrays).
# Get the x locations of data points not filled with NaNs and the x locations of extrapolation points.
epochs = np.array(list(map(n64_to_epoch, times_not_all_nan_and_extrap))) \
if time_agg_str == 'time' else times_not_all_nan_and_extrap
epochs_not_extrap = epochs[:len(times_not_all_nan)]
# Handle aggregations and curve fits. #
# For each data array to plot...
for data_arr_name, agg_dict in plot_descs.items():
data_arr_plotting_data = non_nan_plotting_data[data_arr_name]
# For each aggregation type (e.g. 'mean', 'median')...
for agg_type, plot_dicts in agg_dict.items():
# For each plot for this aggregation type...
for i, plot_dict in enumerate(plot_dicts):
for plot_type, plot_kwargs in plot_dict.items():
assert plot_type in all_plot_types, \
r"For the '{}' DataArray: plot_type '{}' not recognized" \
.format(data_arr_name, plot_type)
# Ensure aggregation types are legal for this data.
# Some plot types require aggregation.
if plot_type in plot_types_requiring_aggregation:
if agg_type not in many_to_one_agg_types:
raise ValueError("For the '{}' DataArray: the plot type "
"'{}' only accepts many-to-one aggregation (currently using '{}'). "
"Please pass any of {} as the aggregation type "
"or change the plot type.".format(data_arr_name, \
plot_type, agg_type,
many_to_one_agg_types))
# Some plot types cannot accept many-to-one aggregation.
if plot_type not in plot_types_handling_aggregation:
if agg_type not in many_to_many_agg_types:
raise ValueError("For the '{}' DataArray: "
"the plot type '{}' only accepts many-to-many aggregation "
"(currently using '{}'). Please pass any of {} as "
"the aggregation type or change the plot type."
.format(data_arr_name, plot_type, agg_type,
many_to_many_agg_types))
# Aggregate if necessary.
y = data_arr_plotting_data
if agg_type == 'min':
y = y.min([x_coord, y_coord])
if agg_type == 'mean':
y = y.mean([x_coord, y_coord])
if agg_type == 'median':
y = y.median([x_coord, y_coord])
if agg_type == 'max':
y = y.max([x_coord, y_coord])
# Handle curve fits.
if plot_type in plot_types_curve_fit:
smooth = plot_kwargs.get('smooth', True)
# Create the curve fit.
x_smooth = None if smooth else epochs_not_extrap
data_arr_epochs, y = get_curvefit(epochs_not_extrap, y.values, fit_type=plot_type,
x_smooth=x_smooth, fit_kwargs=plot_kwargs)
# Convert time stamps to NumPy datetime objects.
data_arr_times = np.array(list(map(_scalar_to_n64_datetime, data_arr_epochs))) \
if time_agg_str == 'time' else data_arr_epochs
# Convert the NumPy array into an xarray DataArray.
coords = {time_agg_str: data_arr_times}
dims = list(coords.keys())
y = xr.DataArray(y, coords=coords, dims=dims)
plotting_data_not_nan_and_extrap[(data_arr_name, agg_type, plot_type)] = y
# Handle the potential for multiple plots.
max_times_per_plot = len(times_not_all_nan_and_extrap) if max_times_per_plot is None else \
max_times_per_plot
num_times = len(times_not_all_nan_and_extrap)
num_plots = int(np.ceil(num_times / max_times_per_plot))
num_times_per_plot = round(num_times / num_plots) if num_plots != 0 else 0
num_cols = min(num_plots, max_cols)
num_rows = int(np.ceil(num_plots / num_cols)) if num_cols != 0 else 0
# Set a reasonable figsize if one is not set in `fig_params`.
fig_params.setdefault('figsize', (12 * num_cols, 6 * num_rows))
fig = plt.figure(**fig_params) if fig is None else fig
# Check if there are no plots to make.
if num_plots == 0:
return fig, plotting_data_not_nan_and_extrap
# Create each plot. #
for time_ind, ax_ind in zip(range(0, len(times_not_all_nan_and_extrap), num_times_per_plot),
range(num_plots)):
# The time bounds of this canvas (or "Axes object" or "plot grid cell").
ax_lower_time_bound_ind, ax_upper_time_bound_ind = \
time_ind, min(time_ind + num_times_per_plot, len(times_not_all_nan_and_extrap))
# Retrieve or create the axes if necessary.
if len(times_not_all_nan_and_extrap) <= num_times_per_plot:
fig, ax = retrieve_or_create_fig_ax(fig, ax, **fig_params)
else:
ax = fig.add_subplot(num_rows, num_cols, ax_ind + 1)
ax_times_not_all_nan_and_extrap = \
times_not_all_nan_and_extrap[ax_lower_time_bound_ind:ax_upper_time_bound_ind]
ax_time_bounds = ax_times_not_all_nan_and_extrap[[0, -1]]
ax_epochs = epochs[ax_lower_time_bound_ind:ax_upper_time_bound_ind]
ax_x_locs = np_scale(ax_epochs if time_agg_str == 'time' else ax_times_not_all_nan_and_extrap)
# Data variable plots within each plot.
data_arr_plots = []
legend_labels = []
# For each data array to plot...
for data_arr_name, agg_dict in plot_descs.items():
# For each aggregation type (e.g. 'mean', 'median')...
for agg_type, plot_dicts in agg_dict.items():
# For each plot for this aggregation type...
for plot_dict in plot_dicts:
for plot_type, plot_kwargs in plot_dict.items():
# Determine the legend label for this plot.
plot_type_str = \
{'scatter': 'scatterplot', 'line': 'lineplot',
'box': 'boxplot', 'gaussian': 'gaussian fit',
'gaussian_filter': 'gaussian filter fit',
'poly': 'degree {} polynomial fit',
'cubic_spline': 'cubic spline fit',
'fourier': 'Fourier fit ({} harmonics)'}[plot_type]
if plot_type == 'poly':
assert 'degree' in plot_kwargs, \
r"For the '{}' DataArray: When using 'poly' as " \
"the fit type, the fit kwargs must have 'degree' " \
"specified.".format(data_arr_name)
plot_type_str = plot_type_str.format(
plot_kwargs.get('degree'))
if plot_type == 'fourier':
plot_type_str = plot_type_str.format(
plot_kwargs.get('n_harm', default_fourier_n_harm))
# Legend labels for the non-extrapolation
# and extrapolation segments
plot_type_strs = []
# Remove plot kwargs that are not recognized
# by plotting methods (cause errors).
plot_kwargs = plot_kwargs.copy()
plot_kwargs.pop('extrap_time', None)
plot_kwargs.pop('n_predict', None)
plot_kwargs.pop('smooth', None)
plot_kwargs.pop('degree', None) # 'degree'
plot_kwargs.pop('n_harm', None) # 'fourier'
# Handle default plot kwargs.
if plot_type == 'box':
plot_kwargs.setdefault('boxprops',
dict(facecolor='orange'))
plot_kwargs.setdefault('flierprops',
dict(marker='o', markersize=0.5))
plot_kwargs.setdefault('showfliers', False)
# Retrieve the plotting data.
y = plotting_data_not_nan_and_extrap[
(data_arr_name, agg_type, plot_type)]
y = y.sel({time_agg_str:
slice(ax_time_bounds[0], ax_time_bounds[1])})
# Handle cases of insufficient data for this section of the plot.
not_nat_times = None
if time_agg_str == 'time':
not_nat_times = ~np.isnat(y[time_agg_str])
else:
not_nat_times = ~np.isnan(y[time_agg_str])
num_unique_times_y = len(np.unique(y[time_agg_str].values[not_nat_times]))
if num_unique_times_y == 0: # There is no data.
continue
if num_unique_times_y == 1: # There is 1 data point.
plot_type = 'scatter';
plot_kwargs = {}
data_arr_epochs = \
np.array(list(map(n64_to_epoch, y[time_agg_str].values))) \
if time_agg_str == 'time' else \
ax_times_not_all_nan_and_extrap
data_arr_x_locs = np.interp(data_arr_epochs,
ax_epochs, ax_x_locs)
data_arr_time_bounds = y[time_agg_str].values[[0, -1]]
# Determine if this plotting data includes extrapolated values.
data_arr_non_extrap_time_bounds = None
data_arr_has_non_extrap = \
data_arr_time_bounds[0] < times_not_all_nan[-1]
if data_arr_has_non_extrap:
data_arr_non_extrap_time_bounds = \
[data_arr_time_bounds[0], min(data_arr_time_bounds[1],
times_not_all_nan[-1])]
# Because the data could be smoothed, the last
# non-extrapolation time is the last time before
# or at the last non-extrapolation time
# for the original data.
non_extrap_plot_last_time = data_arr_non_extrap_time_bounds[1]
if num_unique_times_y > 1:
non_extrap_plot_last_time = \
y.sel({time_agg_str: data_arr_non_extrap_time_bounds[1]},
method='ffill')[time_agg_str].values
data_arr_non_extrap_plotting_time_bounds = [data_arr_non_extrap_time_bounds[0],
non_extrap_plot_last_time]
data_arr_extrap_time_bounds = None
data_arr_has_extrap = times_not_all_nan[-1] < data_arr_time_bounds[1]
if data_arr_has_extrap:
data_arr_extrap_time_bounds = [max(data_arr_time_bounds[0],
extrap_times[0]),
data_arr_time_bounds[1]]
# Because the data could be smoothed, the first extrapolation time
# is the first time after the last non-extrapolation time for the original data.
extrap_plot_first_time = \
y.sel({time_agg_str: data_arr_non_extrap_time_bounds[1]},
method='ffill')[time_agg_str].values \
if data_arr_has_non_extrap else \
data_arr_time_bounds[0]
data_arr_extrap_plotting_time_bounds = [extrap_plot_first_time,
data_arr_extrap_time_bounds[1]]
# Separate non-extrapolation and extrapolation data.
if data_arr_has_non_extrap:
data_arr_non_extrap = \
y.sel({time_agg_str: slice(*data_arr_non_extrap_plotting_time_bounds)})
data_arr_non_extrap_epochs = \
np.array(list(map(n64_to_epoch, data_arr_non_extrap[time_agg_str].values))) \
if time_agg_str == 'time' else data_arr_non_extrap[time_agg_str].values
data_arr_non_extrap_x_locs = \
np.interp(data_arr_non_extrap_epochs, ax_epochs, ax_x_locs)
# Format plotting kwargs for the non-extrapolation data.
plot_kwargs_non_extrap = plot_kwargs.copy()
plot_kwargs_non_extrap.pop('extrap_color', None)
if data_arr_has_extrap:
# Include the last non-extrapolation point so the
# non-extrapolation and extrapolation lines connect.
data_arr_extrap = \
y.sel({time_agg_str: slice(*data_arr_extrap_plotting_time_bounds)})
data_arr_extrap_epochs = \
np.array(list(map(n64_to_epoch, data_arr_extrap[time_agg_str].values))) \
if time_agg_str == 'time' else data_arr_extrap[time_agg_str].values
data_arr_extrap_x_locs = \
np.interp(data_arr_extrap_epochs, ax_epochs, ax_x_locs)
# Format plotting kwargs for the extrapolation data.
plot_kwargs_extrap = plot_kwargs.copy()
extrap_color = plot_kwargs_extrap.pop('extrap_color', None)
if extrap_color is not None:
plot_kwargs_extrap['color'] = extrap_color
# Specify non-extrap and extrap plotting args.
if data_arr_has_non_extrap:
plot_args_non_extrap = \
[data_arr_non_extrap_x_locs, data_arr_non_extrap]
if data_arr_has_extrap:
plot_args_extrap = \
[data_arr_extrap_x_locs, data_arr_extrap]
# Actually create the plot.
def create_plot(x_locs, data_arr, **plot_kwargs):
"""
Creates a plot
Parameters
----------
x_locs: xarray.DataArray
A 1D `xarray.DataArray` containing ascending values
in range [0,1], denoting the x locations on the current
canvas at which to plot data with corresponding time
indicies in `data_arr`.
data_arr: xarray.DataArray
An `xarray.DataArray` containing a dimension named
`time_agg_str` (the value of that variable in this context).
Returns
-------
plot_obj: matplotlib.artist.Artist
The plot.
"""
plot_obj = None
if plot_type == 'scatter':
data_arr_dims = list(data_arr.dims)
data_arr_flat = data_arr.stack(flat=data_arr_dims)
plot_obj = ax.scatter(x_locs, data_arr_flat)
elif plot_type in ['line', 'gaussian', 'gaussian_filter',
'poly', 'cubic_spline', 'fourier']:
plot_obj = ax.plot(x_locs, data_arr)[0]
elif plot_type == 'box':
boxplot_nan_mask = ~np.isnan(data_arr)
# Data formatted for matplotlib.pyplot.boxplot().
filtered_formatted_data = []
for i, (d, m) in enumerate(zip(data_arr.values,
boxplot_nan_mask.values)):
if len(d[m] != 0):
filtered_formatted_data.append(d[m])
box_width = 0.5 * np.min(np.diff(x_locs)) \
if len(x_locs) > 1 else 0.5
# `manage_xticks=False` to avoid excessive padding on x-axis.
bp = ax.boxplot(filtered_formatted_data,
widths=[box_width] * len(filtered_formatted_data),
positions=x_locs, patch_artist=True,
manage_xticks=False, **plot_kwargs)
plot_obj = bp['boxes'][0]
return plot_obj
if data_arr_has_non_extrap:
plot_obj = create_plot(*plot_args_non_extrap, **plot_kwargs_non_extrap)
data_arr_plots.append(plot_obj)
plot_type_strs.append(plot_type_str)
if data_arr_has_extrap and plot_type in plot_types_supporting_extrapolation:
plot_obj = create_plot(*plot_args_extrap, **plot_kwargs_extrap)
data_arr_plots.append(plot_obj)
plot_type_strs.append('extrapolation of ' + plot_type_str)
plot_type_str_suffix = ' of {}'.format(agg_type) if agg_type != 'none' else ''
plot_type_strs = [plot_type_str + plot_type_str_suffix
for plot_type_str in plot_type_strs]
[legend_labels.append('{} of {}'.format(plot_type_str, data_arr_name))
for plot_type_str in plot_type_strs]
# Label the axes and create the legend.
date_strs = \
np.array(list(map(lambda time: np_dt64_to_str(time), ax_times_not_all_nan_and_extrap))) \
if time_agg_str == 'time' else \
naive_months_ticks_by_week(ax_times_not_all_nan_and_extrap) \
if time_agg_str in ['week', 'weekofyear'] else \
month_ints_to_month_names(ax_times_not_all_nan_and_extrap)
plt.xticks(ax_x_locs, date_strs, rotation=45, ha='right', rotation_mode='anchor')
if show_legend:
ax.legend(handles=data_arr_plots, labels=legend_labels, loc='best')
title_postpend = " ({} to {})".format(date_strs[0], date_strs[-1])
title_prepend = "Figure {}".format(ax_ind) if title is None else title
ax.set_title(title_prepend + title_postpend)
return fig, plotting_data_not_nan_and_extrap | [
"def",
"xarray_time_series_plot",
"(",
"dataset",
",",
"plot_descs",
",",
"x_coord",
"=",
"'longitude'",
",",
"y_coord",
"=",
"'latitude'",
",",
"fig_params",
"=",
"None",
",",
"fig",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"show_legend",
"=",
"True",
",",
"title",
"=",
"None",
",",
"max_times_per_plot",
"=",
"None",
",",
"max_cols",
"=",
"1",
")",
":",
"fig_params",
"=",
"{",
"}",
"if",
"fig_params",
"is",
"None",
"else",
"fig_params",
"# Lists of plot types that can and cannot accept many-to-one aggregation",
"# for each time slice, as well as plot types that support extrapolation.",
"plot_types_requiring_aggregation",
"=",
"[",
"'line'",
",",
"'gaussian'",
",",
"'gaussian_filter'",
",",
"'poly'",
",",
"'cubic_spline'",
",",
"'fourier'",
"]",
"plot_types_handling_aggregation",
"=",
"[",
"'scatter'",
"]",
"+",
"plot_types_requiring_aggregation",
"plot_types_not_handling_aggregation",
"=",
"[",
"'box'",
"]",
"plot_types_curve_fit",
"=",
"[",
"'gaussian'",
",",
"'gaussian_filter'",
",",
"'poly'",
",",
"'cubic_spline'",
",",
"'fourier'",
"]",
"plot_types_supporting_extrapolation",
"=",
"[",
"'fourier'",
"]",
"all_plot_types",
"=",
"list",
"(",
"set",
"(",
"plot_types_requiring_aggregation",
"+",
"plot_types_handling_aggregation",
"+",
"plot_types_not_handling_aggregation",
"+",
"plot_types_curve_fit",
"+",
"plot_types_supporting_extrapolation",
")",
")",
"# Aggregation types that aggregate all values for a given time to one value.",
"many_to_one_agg_types",
"=",
"[",
"'min'",
",",
"'mean'",
",",
"'median'",
",",
"'max'",
"]",
"# Aggregation types that aggregate to many values or do not aggregate.",
"many_to_many_agg_types",
"=",
"[",
"'none'",
"]",
"all_agg_types",
"=",
"many_to_one_agg_types",
"+",
"many_to_many_agg_types",
"# Determine how the data was aggregated, if at all.",
"possible_time_agg_strs",
"=",
"[",
"'time'",
",",
"'week'",
",",
"'month'",
"]",
"time_agg_str",
"=",
"'time'",
"for",
"possible_time_agg_str",
"in",
"possible_time_agg_strs",
":",
"if",
"possible_time_agg_str",
"in",
"list",
"(",
"dataset",
".",
"coords",
")",
":",
"time_agg_str",
"=",
"possible_time_agg_str",
"break",
"# Make the data 2D - time and a stack of all other dimensions.",
"all_plotting_data_arrs",
"=",
"list",
"(",
"plot_descs",
".",
"keys",
"(",
")",
")",
"all_plotting_data",
"=",
"dataset",
"[",
"all_plotting_data_arrs",
"]",
"all_times",
"=",
"all_plotting_data",
"[",
"time_agg_str",
"]",
".",
"values",
"# Mask out times for which no data variable to plot has any non-NaN data.",
"nan_mask_data_vars",
"=",
"list",
"(",
"all_plotting_data",
"[",
"all_plotting_data_arrs",
"]",
".",
"notnull",
"(",
")",
".",
"data_vars",
".",
"values",
"(",
")",
")",
"for",
"i",
",",
"data_var",
"in",
"enumerate",
"(",
"nan_mask_data_vars",
")",
":",
"time_nan_mask",
"=",
"data_var",
"if",
"i",
"==",
"0",
"else",
"time_nan_mask",
"|",
"data_var",
"time_nan_mask",
"=",
"time_nan_mask",
".",
"any",
"(",
"[",
"x_coord",
",",
"y_coord",
"]",
")",
"times_not_all_nan",
"=",
"all_times",
"[",
"time_nan_mask",
".",
"values",
"]",
"non_nan_plotting_data",
"=",
"all_plotting_data",
".",
"loc",
"[",
"{",
"time_agg_str",
":",
"times_not_all_nan",
"}",
"]",
"# Determine the number of extrapolation data points. #",
"extrap_day_range",
"=",
"0",
"n_extrap_pts",
"=",
"0",
"# For each data array to plot...",
"for",
"data_arr_name",
",",
"agg_dict",
"in",
"plot_descs",
".",
"items",
"(",
")",
":",
"# For each aggregation type (e.g. 'mean', 'median')...",
"for",
"agg_type",
",",
"plot_dicts",
"in",
"agg_dict",
".",
"items",
"(",
")",
":",
"# For each plot for this aggregation type...",
"for",
"i",
",",
"plot_dict",
"in",
"enumerate",
"(",
"plot_dicts",
")",
":",
"for",
"plot_type",
",",
"plot_kwargs",
"in",
"plot_dict",
".",
"items",
"(",
")",
":",
"# Only check the plot types supporting extrapolation.",
"if",
"plot_type",
"==",
"'fourier'",
":",
"curr_extrap_day_range",
"=",
"0",
"n_predict",
"=",
"0",
"# Default to no extrapolation.",
"# Addressing this way to modify `plot_descs`.",
"extrap_time",
"=",
"plot_kwargs",
".",
"get",
"(",
"'extrap_time'",
",",
"None",
")",
"if",
"extrap_time",
"is",
"not",
"None",
":",
"assert",
"time_agg_str",
"==",
"'time'",
",",
"\"Extrapolating for data with a time dimension other than 'time' - \"",
"\"such as 'month', or 'week' - is not supported. A time dimension of 'month' \"",
"\"or 'week' denotes data aggregated for each month or week across years, so \"",
"\"extrapolation is meaningless in that case. Support for a time dimension of 'year' \"",
"\"has not yet been added.\"",
"# Determine the number of points to extrapolate (in an approximate manner).",
"# First find the time range of the given data.",
"first_last_days",
"=",
"list",
"(",
"map",
"(",
"lambda",
"np_dt_64",
":",
"_n64_to_datetime",
"(",
"np_dt_64",
")",
",",
"non_nan_plotting_data",
".",
"time",
".",
"values",
"[",
"[",
"0",
",",
"-",
"1",
"]",
"]",
")",
")",
"year_range",
"=",
"first_last_days",
"[",
"1",
"]",
".",
"year",
"-",
"first_last_days",
"[",
"0",
"]",
".",
"year",
"month_range",
"=",
"first_last_days",
"[",
"1",
"]",
".",
"month",
"-",
"first_last_days",
"[",
"0",
"]",
".",
"month",
"day_range",
"=",
"first_last_days",
"[",
"1",
"]",
".",
"day",
"-",
"first_last_days",
"[",
"0",
"]",
".",
"day",
"day_range",
"=",
"year_range",
"*",
"365.25",
"+",
"month_range",
"*",
"30",
"+",
"day_range",
"# Then find the time range of the extrapolation string.",
"fields",
"=",
"re",
".",
"match",
"(",
"r\"(?P<num>[0-9]{0,5})(?P<unit>[YMD])\"",
",",
"extrap_time",
")",
"assert",
"fields",
"is",
"not",
"None",
",",
"r\"For the '{}' DataArray: When using 'fourier' as \"",
"\"the fit type, if the 'extrap_time' parameter is supplied, it must be \"",
"\"a string containing a positive integer followed by one of ['Y', 'M', or 'D'].\"",
".",
"format",
"(",
"data_arr_name",
")",
"num",
",",
"unit",
"=",
"int",
"(",
"fields",
"[",
"'num'",
"]",
")",
",",
"fields",
"[",
"'unit'",
"]",
"days_per_unit",
"=",
"dict",
"(",
"Y",
"=",
"365.25",
",",
"M",
"=",
"30",
",",
"D",
"=",
"1",
")",
"[",
"unit",
"]",
"curr_extrap_day_range",
"=",
"num",
"*",
"days_per_unit",
"n_predict",
"=",
"round",
"(",
"len",
"(",
"non_nan_plotting_data",
"[",
"time_agg_str",
"]",
")",
"*",
"(",
"curr_extrap_day_range",
"/",
"day_range",
")",
")",
"plot_descs",
"[",
"data_arr_name",
"]",
"[",
"agg_type",
"]",
"[",
"i",
"]",
"[",
"plot_type",
"]",
"[",
"'n_predict'",
"]",
"=",
"n_predict",
"# This parameter is used by get_curvefit() later.",
"extrap_day_range",
"=",
"max",
"(",
"extrap_day_range",
",",
"curr_extrap_day_range",
")",
"n_extrap_pts",
"=",
"max",
"(",
"n_extrap_pts",
",",
"n_predict",
")",
"# Collect (1) the times not containing only NaN values and (2) the extrapolation times.",
"if",
"time_agg_str",
"==",
"'time'",
"and",
"len",
"(",
"times_not_all_nan",
")",
">",
"0",
":",
"first_extrap_time",
"=",
"times_not_all_nan",
"[",
"-",
"1",
"]",
"+",
"np",
".",
"timedelta64",
"(",
"extrap_day_range",
",",
"'D'",
")",
"/",
"n_extrap_pts",
"last_extrap_time",
"=",
"times_not_all_nan",
"[",
"-",
"1",
"]",
"+",
"np",
".",
"timedelta64",
"(",
"extrap_day_range",
",",
"'D'",
")",
"extrap_times",
"=",
"np",
".",
"linspace",
"(",
"_n64_datetime_to_scalar",
"(",
"first_extrap_time",
")",
",",
"_n64_datetime_to_scalar",
"(",
"last_extrap_time",
")",
",",
"num",
"=",
"n_extrap_pts",
")",
"extrap_times",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"_scalar_to_n64_datetime",
",",
"extrap_times",
")",
")",
")",
"times_not_all_nan_and_extrap",
"=",
"np",
".",
"concatenate",
"(",
"(",
"times_not_all_nan",
",",
"extrap_times",
")",
")",
"if",
"len",
"(",
"extrap_times",
")",
">",
"0",
"else",
"times_not_all_nan",
"else",
":",
"times_not_all_nan_and_extrap",
"=",
"times_not_all_nan",
"# Compute all of the plotting data - handling aggregations and extrapolations.",
"plotting_data_not_nan_and_extrap",
"=",
"{",
"}",
"# Maps data arary names to plotting data (NumPy arrays).",
"# Get the x locations of data points not filled with NaNs and the x locations of extrapolation points.",
"epochs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"n64_to_epoch",
",",
"times_not_all_nan_and_extrap",
")",
")",
")",
"if",
"time_agg_str",
"==",
"'time'",
"else",
"times_not_all_nan_and_extrap",
"epochs_not_extrap",
"=",
"epochs",
"[",
":",
"len",
"(",
"times_not_all_nan",
")",
"]",
"# Handle aggregations and curve fits. #",
"# For each data array to plot...",
"for",
"data_arr_name",
",",
"agg_dict",
"in",
"plot_descs",
".",
"items",
"(",
")",
":",
"data_arr_plotting_data",
"=",
"non_nan_plotting_data",
"[",
"data_arr_name",
"]",
"# For each aggregation type (e.g. 'mean', 'median')...",
"for",
"agg_type",
",",
"plot_dicts",
"in",
"agg_dict",
".",
"items",
"(",
")",
":",
"# For each plot for this aggregation type...",
"for",
"i",
",",
"plot_dict",
"in",
"enumerate",
"(",
"plot_dicts",
")",
":",
"for",
"plot_type",
",",
"plot_kwargs",
"in",
"plot_dict",
".",
"items",
"(",
")",
":",
"assert",
"plot_type",
"in",
"all_plot_types",
",",
"r\"For the '{}' DataArray: plot_type '{}' not recognized\"",
".",
"format",
"(",
"data_arr_name",
",",
"plot_type",
")",
"# Ensure aggregation types are legal for this data.",
"# Some plot types require aggregation.",
"if",
"plot_type",
"in",
"plot_types_requiring_aggregation",
":",
"if",
"agg_type",
"not",
"in",
"many_to_one_agg_types",
":",
"raise",
"ValueError",
"(",
"\"For the '{}' DataArray: the plot type \"",
"\"'{}' only accepts many-to-one aggregation (currently using '{}'). \"",
"\"Please pass any of {} as the aggregation type \"",
"\"or change the plot type.\"",
".",
"format",
"(",
"data_arr_name",
",",
"plot_type",
",",
"agg_type",
",",
"many_to_one_agg_types",
")",
")",
"# Some plot types cannot accept many-to-one aggregation.",
"if",
"plot_type",
"not",
"in",
"plot_types_handling_aggregation",
":",
"if",
"agg_type",
"not",
"in",
"many_to_many_agg_types",
":",
"raise",
"ValueError",
"(",
"\"For the '{}' DataArray: \"",
"\"the plot type '{}' only accepts many-to-many aggregation \"",
"\"(currently using '{}'). Please pass any of {} as \"",
"\"the aggregation type or change the plot type.\"",
".",
"format",
"(",
"data_arr_name",
",",
"plot_type",
",",
"agg_type",
",",
"many_to_many_agg_types",
")",
")",
"# Aggregate if necessary.",
"y",
"=",
"data_arr_plotting_data",
"if",
"agg_type",
"==",
"'min'",
":",
"y",
"=",
"y",
".",
"min",
"(",
"[",
"x_coord",
",",
"y_coord",
"]",
")",
"if",
"agg_type",
"==",
"'mean'",
":",
"y",
"=",
"y",
".",
"mean",
"(",
"[",
"x_coord",
",",
"y_coord",
"]",
")",
"if",
"agg_type",
"==",
"'median'",
":",
"y",
"=",
"y",
".",
"median",
"(",
"[",
"x_coord",
",",
"y_coord",
"]",
")",
"if",
"agg_type",
"==",
"'max'",
":",
"y",
"=",
"y",
".",
"max",
"(",
"[",
"x_coord",
",",
"y_coord",
"]",
")",
"# Handle curve fits.",
"if",
"plot_type",
"in",
"plot_types_curve_fit",
":",
"smooth",
"=",
"plot_kwargs",
".",
"get",
"(",
"'smooth'",
",",
"True",
")",
"# Create the curve fit.",
"x_smooth",
"=",
"None",
"if",
"smooth",
"else",
"epochs_not_extrap",
"data_arr_epochs",
",",
"y",
"=",
"get_curvefit",
"(",
"epochs_not_extrap",
",",
"y",
".",
"values",
",",
"fit_type",
"=",
"plot_type",
",",
"x_smooth",
"=",
"x_smooth",
",",
"fit_kwargs",
"=",
"plot_kwargs",
")",
"# Convert time stamps to NumPy datetime objects.",
"data_arr_times",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"_scalar_to_n64_datetime",
",",
"data_arr_epochs",
")",
")",
")",
"if",
"time_agg_str",
"==",
"'time'",
"else",
"data_arr_epochs",
"# Convert the NumPy array into an xarray DataArray.",
"coords",
"=",
"{",
"time_agg_str",
":",
"data_arr_times",
"}",
"dims",
"=",
"list",
"(",
"coords",
".",
"keys",
"(",
")",
")",
"y",
"=",
"xr",
".",
"DataArray",
"(",
"y",
",",
"coords",
"=",
"coords",
",",
"dims",
"=",
"dims",
")",
"plotting_data_not_nan_and_extrap",
"[",
"(",
"data_arr_name",
",",
"agg_type",
",",
"plot_type",
")",
"]",
"=",
"y",
"# Handle the potential for multiple plots.",
"max_times_per_plot",
"=",
"len",
"(",
"times_not_all_nan_and_extrap",
")",
"if",
"max_times_per_plot",
"is",
"None",
"else",
"max_times_per_plot",
"num_times",
"=",
"len",
"(",
"times_not_all_nan_and_extrap",
")",
"num_plots",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"num_times",
"/",
"max_times_per_plot",
")",
")",
"num_times_per_plot",
"=",
"round",
"(",
"num_times",
"/",
"num_plots",
")",
"if",
"num_plots",
"!=",
"0",
"else",
"0",
"num_cols",
"=",
"min",
"(",
"num_plots",
",",
"max_cols",
")",
"num_rows",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"num_plots",
"/",
"num_cols",
")",
")",
"if",
"num_cols",
"!=",
"0",
"else",
"0",
"# Set a reasonable figsize if one is not set in `fig_params`.",
"fig_params",
".",
"setdefault",
"(",
"'figsize'",
",",
"(",
"12",
"*",
"num_cols",
",",
"6",
"*",
"num_rows",
")",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
"*",
"*",
"fig_params",
")",
"if",
"fig",
"is",
"None",
"else",
"fig",
"# Check if there are no plots to make.",
"if",
"num_plots",
"==",
"0",
":",
"return",
"fig",
",",
"plotting_data_not_nan_and_extrap",
"# Create each plot. #",
"for",
"time_ind",
",",
"ax_ind",
"in",
"zip",
"(",
"range",
"(",
"0",
",",
"len",
"(",
"times_not_all_nan_and_extrap",
")",
",",
"num_times_per_plot",
")",
",",
"range",
"(",
"num_plots",
")",
")",
":",
"# The time bounds of this canvas (or \"Axes object\" or \"plot grid cell\").",
"ax_lower_time_bound_ind",
",",
"ax_upper_time_bound_ind",
"=",
"time_ind",
",",
"min",
"(",
"time_ind",
"+",
"num_times_per_plot",
",",
"len",
"(",
"times_not_all_nan_and_extrap",
")",
")",
"# Retrieve or create the axes if necessary.",
"if",
"len",
"(",
"times_not_all_nan_and_extrap",
")",
"<=",
"num_times_per_plot",
":",
"fig",
",",
"ax",
"=",
"retrieve_or_create_fig_ax",
"(",
"fig",
",",
"ax",
",",
"*",
"*",
"fig_params",
")",
"else",
":",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"num_rows",
",",
"num_cols",
",",
"ax_ind",
"+",
"1",
")",
"ax_times_not_all_nan_and_extrap",
"=",
"times_not_all_nan_and_extrap",
"[",
"ax_lower_time_bound_ind",
":",
"ax_upper_time_bound_ind",
"]",
"ax_time_bounds",
"=",
"ax_times_not_all_nan_and_extrap",
"[",
"[",
"0",
",",
"-",
"1",
"]",
"]",
"ax_epochs",
"=",
"epochs",
"[",
"ax_lower_time_bound_ind",
":",
"ax_upper_time_bound_ind",
"]",
"ax_x_locs",
"=",
"np_scale",
"(",
"ax_epochs",
"if",
"time_agg_str",
"==",
"'time'",
"else",
"ax_times_not_all_nan_and_extrap",
")",
"# Data variable plots within each plot.",
"data_arr_plots",
"=",
"[",
"]",
"legend_labels",
"=",
"[",
"]",
"# For each data array to plot...",
"for",
"data_arr_name",
",",
"agg_dict",
"in",
"plot_descs",
".",
"items",
"(",
")",
":",
"# For each aggregation type (e.g. 'mean', 'median')...",
"for",
"agg_type",
",",
"plot_dicts",
"in",
"agg_dict",
".",
"items",
"(",
")",
":",
"# For each plot for this aggregation type...",
"for",
"plot_dict",
"in",
"plot_dicts",
":",
"for",
"plot_type",
",",
"plot_kwargs",
"in",
"plot_dict",
".",
"items",
"(",
")",
":",
"# Determine the legend label for this plot.",
"plot_type_str",
"=",
"{",
"'scatter'",
":",
"'scatterplot'",
",",
"'line'",
":",
"'lineplot'",
",",
"'box'",
":",
"'boxplot'",
",",
"'gaussian'",
":",
"'gaussian fit'",
",",
"'gaussian_filter'",
":",
"'gaussian filter fit'",
",",
"'poly'",
":",
"'degree {} polynomial fit'",
",",
"'cubic_spline'",
":",
"'cubic spline fit'",
",",
"'fourier'",
":",
"'Fourier fit ({} harmonics)'",
"}",
"[",
"plot_type",
"]",
"if",
"plot_type",
"==",
"'poly'",
":",
"assert",
"'degree'",
"in",
"plot_kwargs",
",",
"r\"For the '{}' DataArray: When using 'poly' as \"",
"\"the fit type, the fit kwargs must have 'degree' \"",
"\"specified.\"",
".",
"format",
"(",
"data_arr_name",
")",
"plot_type_str",
"=",
"plot_type_str",
".",
"format",
"(",
"plot_kwargs",
".",
"get",
"(",
"'degree'",
")",
")",
"if",
"plot_type",
"==",
"'fourier'",
":",
"plot_type_str",
"=",
"plot_type_str",
".",
"format",
"(",
"plot_kwargs",
".",
"get",
"(",
"'n_harm'",
",",
"default_fourier_n_harm",
")",
")",
"# Legend labels for the non-extrapolation",
"# and extrapolation segments",
"plot_type_strs",
"=",
"[",
"]",
"# Remove plot kwargs that are not recognized",
"# by plotting methods (cause errors).",
"plot_kwargs",
"=",
"plot_kwargs",
".",
"copy",
"(",
")",
"plot_kwargs",
".",
"pop",
"(",
"'extrap_time'",
",",
"None",
")",
"plot_kwargs",
".",
"pop",
"(",
"'n_predict'",
",",
"None",
")",
"plot_kwargs",
".",
"pop",
"(",
"'smooth'",
",",
"None",
")",
"plot_kwargs",
".",
"pop",
"(",
"'degree'",
",",
"None",
")",
"# 'degree'",
"plot_kwargs",
".",
"pop",
"(",
"'n_harm'",
",",
"None",
")",
"# 'fourier'",
"# Handle default plot kwargs.",
"if",
"plot_type",
"==",
"'box'",
":",
"plot_kwargs",
".",
"setdefault",
"(",
"'boxprops'",
",",
"dict",
"(",
"facecolor",
"=",
"'orange'",
")",
")",
"plot_kwargs",
".",
"setdefault",
"(",
"'flierprops'",
",",
"dict",
"(",
"marker",
"=",
"'o'",
",",
"markersize",
"=",
"0.5",
")",
")",
"plot_kwargs",
".",
"setdefault",
"(",
"'showfliers'",
",",
"False",
")",
"# Retrieve the plotting data.",
"y",
"=",
"plotting_data_not_nan_and_extrap",
"[",
"(",
"data_arr_name",
",",
"agg_type",
",",
"plot_type",
")",
"]",
"y",
"=",
"y",
".",
"sel",
"(",
"{",
"time_agg_str",
":",
"slice",
"(",
"ax_time_bounds",
"[",
"0",
"]",
",",
"ax_time_bounds",
"[",
"1",
"]",
")",
"}",
")",
"# Handle cases of insufficient data for this section of the plot.",
"not_nat_times",
"=",
"None",
"if",
"time_agg_str",
"==",
"'time'",
":",
"not_nat_times",
"=",
"~",
"np",
".",
"isnat",
"(",
"y",
"[",
"time_agg_str",
"]",
")",
"else",
":",
"not_nat_times",
"=",
"~",
"np",
".",
"isnan",
"(",
"y",
"[",
"time_agg_str",
"]",
")",
"num_unique_times_y",
"=",
"len",
"(",
"np",
".",
"unique",
"(",
"y",
"[",
"time_agg_str",
"]",
".",
"values",
"[",
"not_nat_times",
"]",
")",
")",
"if",
"num_unique_times_y",
"==",
"0",
":",
"# There is no data.",
"continue",
"if",
"num_unique_times_y",
"==",
"1",
":",
"# There is 1 data point.",
"plot_type",
"=",
"'scatter'",
"plot_kwargs",
"=",
"{",
"}",
"data_arr_epochs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"n64_to_epoch",
",",
"y",
"[",
"time_agg_str",
"]",
".",
"values",
")",
")",
")",
"if",
"time_agg_str",
"==",
"'time'",
"else",
"ax_times_not_all_nan_and_extrap",
"data_arr_x_locs",
"=",
"np",
".",
"interp",
"(",
"data_arr_epochs",
",",
"ax_epochs",
",",
"ax_x_locs",
")",
"data_arr_time_bounds",
"=",
"y",
"[",
"time_agg_str",
"]",
".",
"values",
"[",
"[",
"0",
",",
"-",
"1",
"]",
"]",
"# Determine if this plotting data includes extrapolated values.",
"data_arr_non_extrap_time_bounds",
"=",
"None",
"data_arr_has_non_extrap",
"=",
"data_arr_time_bounds",
"[",
"0",
"]",
"<",
"times_not_all_nan",
"[",
"-",
"1",
"]",
"if",
"data_arr_has_non_extrap",
":",
"data_arr_non_extrap_time_bounds",
"=",
"[",
"data_arr_time_bounds",
"[",
"0",
"]",
",",
"min",
"(",
"data_arr_time_bounds",
"[",
"1",
"]",
",",
"times_not_all_nan",
"[",
"-",
"1",
"]",
")",
"]",
"# Because the data could be smoothed, the last",
"# non-extrapolation time is the last time before",
"# or at the last non-extrapolation time",
"# for the original data.",
"non_extrap_plot_last_time",
"=",
"data_arr_non_extrap_time_bounds",
"[",
"1",
"]",
"if",
"num_unique_times_y",
">",
"1",
":",
"non_extrap_plot_last_time",
"=",
"y",
".",
"sel",
"(",
"{",
"time_agg_str",
":",
"data_arr_non_extrap_time_bounds",
"[",
"1",
"]",
"}",
",",
"method",
"=",
"'ffill'",
")",
"[",
"time_agg_str",
"]",
".",
"values",
"data_arr_non_extrap_plotting_time_bounds",
"=",
"[",
"data_arr_non_extrap_time_bounds",
"[",
"0",
"]",
",",
"non_extrap_plot_last_time",
"]",
"data_arr_extrap_time_bounds",
"=",
"None",
"data_arr_has_extrap",
"=",
"times_not_all_nan",
"[",
"-",
"1",
"]",
"<",
"data_arr_time_bounds",
"[",
"1",
"]",
"if",
"data_arr_has_extrap",
":",
"data_arr_extrap_time_bounds",
"=",
"[",
"max",
"(",
"data_arr_time_bounds",
"[",
"0",
"]",
",",
"extrap_times",
"[",
"0",
"]",
")",
",",
"data_arr_time_bounds",
"[",
"1",
"]",
"]",
"# Because the data could be smoothed, the first extrapolation time",
"# is the first time after the last non-extrapolation time for the original data.",
"extrap_plot_first_time",
"=",
"y",
".",
"sel",
"(",
"{",
"time_agg_str",
":",
"data_arr_non_extrap_time_bounds",
"[",
"1",
"]",
"}",
",",
"method",
"=",
"'ffill'",
")",
"[",
"time_agg_str",
"]",
".",
"values",
"if",
"data_arr_has_non_extrap",
"else",
"data_arr_time_bounds",
"[",
"0",
"]",
"data_arr_extrap_plotting_time_bounds",
"=",
"[",
"extrap_plot_first_time",
",",
"data_arr_extrap_time_bounds",
"[",
"1",
"]",
"]",
"# Separate non-extrapolation and extrapolation data.",
"if",
"data_arr_has_non_extrap",
":",
"data_arr_non_extrap",
"=",
"y",
".",
"sel",
"(",
"{",
"time_agg_str",
":",
"slice",
"(",
"*",
"data_arr_non_extrap_plotting_time_bounds",
")",
"}",
")",
"data_arr_non_extrap_epochs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"n64_to_epoch",
",",
"data_arr_non_extrap",
"[",
"time_agg_str",
"]",
".",
"values",
")",
")",
")",
"if",
"time_agg_str",
"==",
"'time'",
"else",
"data_arr_non_extrap",
"[",
"time_agg_str",
"]",
".",
"values",
"data_arr_non_extrap_x_locs",
"=",
"np",
".",
"interp",
"(",
"data_arr_non_extrap_epochs",
",",
"ax_epochs",
",",
"ax_x_locs",
")",
"# Format plotting kwargs for the non-extrapolation data.",
"plot_kwargs_non_extrap",
"=",
"plot_kwargs",
".",
"copy",
"(",
")",
"plot_kwargs_non_extrap",
".",
"pop",
"(",
"'extrap_color'",
",",
"None",
")",
"if",
"data_arr_has_extrap",
":",
"# Include the last non-extrapolation point so the",
"# non-extrapolation and extrapolation lines connect.",
"data_arr_extrap",
"=",
"y",
".",
"sel",
"(",
"{",
"time_agg_str",
":",
"slice",
"(",
"*",
"data_arr_extrap_plotting_time_bounds",
")",
"}",
")",
"data_arr_extrap_epochs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"n64_to_epoch",
",",
"data_arr_extrap",
"[",
"time_agg_str",
"]",
".",
"values",
")",
")",
")",
"if",
"time_agg_str",
"==",
"'time'",
"else",
"data_arr_extrap",
"[",
"time_agg_str",
"]",
".",
"values",
"data_arr_extrap_x_locs",
"=",
"np",
".",
"interp",
"(",
"data_arr_extrap_epochs",
",",
"ax_epochs",
",",
"ax_x_locs",
")",
"# Format plotting kwargs for the extrapolation data.",
"plot_kwargs_extrap",
"=",
"plot_kwargs",
".",
"copy",
"(",
")",
"extrap_color",
"=",
"plot_kwargs_extrap",
".",
"pop",
"(",
"'extrap_color'",
",",
"None",
")",
"if",
"extrap_color",
"is",
"not",
"None",
":",
"plot_kwargs_extrap",
"[",
"'color'",
"]",
"=",
"extrap_color",
"# Specify non-extrap and extrap plotting args.",
"if",
"data_arr_has_non_extrap",
":",
"plot_args_non_extrap",
"=",
"[",
"data_arr_non_extrap_x_locs",
",",
"data_arr_non_extrap",
"]",
"if",
"data_arr_has_extrap",
":",
"plot_args_extrap",
"=",
"[",
"data_arr_extrap_x_locs",
",",
"data_arr_extrap",
"]",
"# Actually create the plot.",
"def",
"create_plot",
"(",
"x_locs",
",",
"data_arr",
",",
"*",
"*",
"plot_kwargs",
")",
":",
"\"\"\"\n Creates a plot\n\n Parameters\n ----------\n x_locs: xarray.DataArray\n A 1D `xarray.DataArray` containing ascending values\n in range [0,1], denoting the x locations on the current\n canvas at which to plot data with corresponding time\n indicies in `data_arr`.\n data_arr: xarray.DataArray\n An `xarray.DataArray` containing a dimension named\n `time_agg_str` (the value of that variable in this context).\n\n Returns\n -------\n plot_obj: matplotlib.artist.Artist\n The plot.\n \"\"\"",
"plot_obj",
"=",
"None",
"if",
"plot_type",
"==",
"'scatter'",
":",
"data_arr_dims",
"=",
"list",
"(",
"data_arr",
".",
"dims",
")",
"data_arr_flat",
"=",
"data_arr",
".",
"stack",
"(",
"flat",
"=",
"data_arr_dims",
")",
"plot_obj",
"=",
"ax",
".",
"scatter",
"(",
"x_locs",
",",
"data_arr_flat",
")",
"elif",
"plot_type",
"in",
"[",
"'line'",
",",
"'gaussian'",
",",
"'gaussian_filter'",
",",
"'poly'",
",",
"'cubic_spline'",
",",
"'fourier'",
"]",
":",
"plot_obj",
"=",
"ax",
".",
"plot",
"(",
"x_locs",
",",
"data_arr",
")",
"[",
"0",
"]",
"elif",
"plot_type",
"==",
"'box'",
":",
"boxplot_nan_mask",
"=",
"~",
"np",
".",
"isnan",
"(",
"data_arr",
")",
"# Data formatted for matplotlib.pyplot.boxplot().",
"filtered_formatted_data",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"d",
",",
"m",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"data_arr",
".",
"values",
",",
"boxplot_nan_mask",
".",
"values",
")",
")",
":",
"if",
"len",
"(",
"d",
"[",
"m",
"]",
"!=",
"0",
")",
":",
"filtered_formatted_data",
".",
"append",
"(",
"d",
"[",
"m",
"]",
")",
"box_width",
"=",
"0.5",
"*",
"np",
".",
"min",
"(",
"np",
".",
"diff",
"(",
"x_locs",
")",
")",
"if",
"len",
"(",
"x_locs",
")",
">",
"1",
"else",
"0.5",
"# `manage_xticks=False` to avoid excessive padding on x-axis.",
"bp",
"=",
"ax",
".",
"boxplot",
"(",
"filtered_formatted_data",
",",
"widths",
"=",
"[",
"box_width",
"]",
"*",
"len",
"(",
"filtered_formatted_data",
")",
",",
"positions",
"=",
"x_locs",
",",
"patch_artist",
"=",
"True",
",",
"manage_xticks",
"=",
"False",
",",
"*",
"*",
"plot_kwargs",
")",
"plot_obj",
"=",
"bp",
"[",
"'boxes'",
"]",
"[",
"0",
"]",
"return",
"plot_obj",
"if",
"data_arr_has_non_extrap",
":",
"plot_obj",
"=",
"create_plot",
"(",
"*",
"plot_args_non_extrap",
",",
"*",
"*",
"plot_kwargs_non_extrap",
")",
"data_arr_plots",
".",
"append",
"(",
"plot_obj",
")",
"plot_type_strs",
".",
"append",
"(",
"plot_type_str",
")",
"if",
"data_arr_has_extrap",
"and",
"plot_type",
"in",
"plot_types_supporting_extrapolation",
":",
"plot_obj",
"=",
"create_plot",
"(",
"*",
"plot_args_extrap",
",",
"*",
"*",
"plot_kwargs_extrap",
")",
"data_arr_plots",
".",
"append",
"(",
"plot_obj",
")",
"plot_type_strs",
".",
"append",
"(",
"'extrapolation of '",
"+",
"plot_type_str",
")",
"plot_type_str_suffix",
"=",
"' of {}'",
".",
"format",
"(",
"agg_type",
")",
"if",
"agg_type",
"!=",
"'none'",
"else",
"''",
"plot_type_strs",
"=",
"[",
"plot_type_str",
"+",
"plot_type_str_suffix",
"for",
"plot_type_str",
"in",
"plot_type_strs",
"]",
"[",
"legend_labels",
".",
"append",
"(",
"'{} of {}'",
".",
"format",
"(",
"plot_type_str",
",",
"data_arr_name",
")",
")",
"for",
"plot_type_str",
"in",
"plot_type_strs",
"]",
"# Label the axes and create the legend.",
"date_strs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"lambda",
"time",
":",
"np_dt64_to_str",
"(",
"time",
")",
",",
"ax_times_not_all_nan_and_extrap",
")",
")",
")",
"if",
"time_agg_str",
"==",
"'time'",
"else",
"naive_months_ticks_by_week",
"(",
"ax_times_not_all_nan_and_extrap",
")",
"if",
"time_agg_str",
"in",
"[",
"'week'",
",",
"'weekofyear'",
"]",
"else",
"month_ints_to_month_names",
"(",
"ax_times_not_all_nan_and_extrap",
")",
"plt",
".",
"xticks",
"(",
"ax_x_locs",
",",
"date_strs",
",",
"rotation",
"=",
"45",
",",
"ha",
"=",
"'right'",
",",
"rotation_mode",
"=",
"'anchor'",
")",
"if",
"show_legend",
":",
"ax",
".",
"legend",
"(",
"handles",
"=",
"data_arr_plots",
",",
"labels",
"=",
"legend_labels",
",",
"loc",
"=",
"'best'",
")",
"title_postpend",
"=",
"\" ({} to {})\"",
".",
"format",
"(",
"date_strs",
"[",
"0",
"]",
",",
"date_strs",
"[",
"-",
"1",
"]",
")",
"title_prepend",
"=",
"\"Figure {}\"",
".",
"format",
"(",
"ax_ind",
")",
"if",
"title",
"is",
"None",
"else",
"title",
"ax",
".",
"set_title",
"(",
"title_prepend",
"+",
"title_postpend",
")",
"return",
"fig",
",",
"plotting_data_not_nan_and_extrap"
] | [
258,
0
] | [
777,
48
] | python | en | ['en', 'error', 'th'] | False |
get_curvefit | (x, y, fit_type, x_smooth=None, n_pts=n_pts_smooth, fit_kwargs=None) |
Gets a curve fit given x values, y values, a type of curve, and parameters for that curve.
Parameters
----------
x: np.ndarray
A 1D NumPy array. The x values to fit to.
y: np.ndarray
A 1D NumPy array. The y values to fit to.
fit_type: str
The type of curve to fit. One of ['gaussian', 'gaussian_filter', 'poly',
'cubic_spline', 'fourier'].
The option 'gaussian' creates a Gaussian fit.
The option 'gaussian_filter' creates a Gaussian filter fit.
The option 'poly' creates a polynomial fit.
The option 'cubic_spline' creates a cubic spline fit.
The option 'fourier' creates a Fourier curve fit.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
fit_kwargs: dict
Keyword arguments for the selected fit type.
In the case of `fit_type == 'poly'`, this must contain a 'degree' entry (an int),
which is the degree of the polynomial to fit.
In the case of `fit_type == 'gaussian_filter'`, this may contain a 'sigma' entry,
which is the standard deviation of the Gaussian kernel.
A larger value yields a smoother but less close-fitting curve.
In the case of `fit_type == 'fourier'`, this may contain 'n_predict' or 'n_harm' entries.
The 'n_predict' entry is the number of points to extrapolate.
The points will be spaced evenly by the mean spacing of values in `x`.
The 'n_harm' entry is the number of harmonics to use.
A higher value yields a closer fit.
Returns
-------
x_smooth, y_smooth: numpy.ndarray
The smoothed x and y values of the curve fit.
If there are no non-NaN values in `y`, these will be filled with `n_pts` NaNs.
If there is only 1 non-NaN value in `y`, these will be filled with
their corresponding values (y or x value) for that point to a length of `n_pts`.
:Authors:
John Rattz ([email protected])
|
Gets a curve fit given x values, y values, a type of curve, and parameters for that curve. | def get_curvefit(x, y, fit_type, x_smooth=None, n_pts=n_pts_smooth, fit_kwargs=None):
"""
Gets a curve fit given x values, y values, a type of curve, and parameters for that curve.
Parameters
----------
x: np.ndarray
A 1D NumPy array. The x values to fit to.
y: np.ndarray
A 1D NumPy array. The y values to fit to.
fit_type: str
The type of curve to fit. One of ['gaussian', 'gaussian_filter', 'poly',
'cubic_spline', 'fourier'].
The option 'gaussian' creates a Gaussian fit.
The option 'gaussian_filter' creates a Gaussian filter fit.
The option 'poly' creates a polynomial fit.
The option 'cubic_spline' creates a cubic spline fit.
The option 'fourier' creates a Fourier curve fit.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
fit_kwargs: dict
Keyword arguments for the selected fit type.
In the case of `fit_type == 'poly'`, this must contain a 'degree' entry (an int),
which is the degree of the polynomial to fit.
In the case of `fit_type == 'gaussian_filter'`, this may contain a 'sigma' entry,
which is the standard deviation of the Gaussian kernel.
A larger value yields a smoother but less close-fitting curve.
In the case of `fit_type == 'fourier'`, this may contain 'n_predict' or 'n_harm' entries.
The 'n_predict' entry is the number of points to extrapolate.
The points will be spaced evenly by the mean spacing of values in `x`.
The 'n_harm' entry is the number of harmonics to use.
A higher value yields a closer fit.
Returns
-------
x_smooth, y_smooth: numpy.ndarray
The smoothed x and y values of the curve fit.
If there are no non-NaN values in `y`, these will be filled with `n_pts` NaNs.
If there is only 1 non-NaN value in `y`, these will be filled with
their corresponding values (y or x value) for that point to a length of `n_pts`.
:Authors:
John Rattz ([email protected])
"""
interpolation_curve_fits = ['gaussian', 'gaussian_filter',
'poly', 'cubic_spline']
extrapolation_curve_filts = ['fourier']
# Handle NaNs (omit them).
not_nan_mask = ~np.isnan(y)
x = x[not_nan_mask]; y = y[not_nan_mask]
# Handle the cases of there being too few points to curve fit.
if len(y) == 0:
x_smooth = np.repeat(np.nan, n_pts)
y_smooth = np.repeat(np.nan, n_pts)
return x_smooth, y_smooth
if len(y) == 1:
x_smooth = np.repeat(x[0], n_pts)
y_smooth = np.repeat(y[0], n_pts)
return x_smooth, y_smooth
if x_smooth is None:
x_smooth_inds = np.linspace(0, len(x) - 1, n_pts)
x_smooth = np.interp(x_smooth_inds, np.arange(len(x)), x)
opt_params = {}
if fit_type == 'gaussian':
x_smooth, y_smooth = gaussian_fit(x, y, x_smooth)
elif fit_type == 'gaussian_filter':
if 'sigma' in fit_kwargs:
opt_params.update(dict(sigma=fit_kwargs.get('sigma')))
x_smooth, y_smooth = gaussian_filter_fit(x, y, x_smooth,
**opt_params)
elif fit_type == 'poly':
assert 'degree' in fit_kwargs.keys(), \
"When plotting a polynomal fit, there must be" \
"a 'degree' entry in the plot_kwargs parameter."
degree = fit_kwargs.get('degree')
x_smooth, y_smooth = poly_fit(x, y, degree, x_smooth)
elif fit_type == 'cubic_spline':
cs = CubicSpline(x, y)
y_smooth = cs(x_smooth)
if fit_type in extrapolation_curve_filts:
n_predict = fit_kwargs.get('n_predict', 0)
if fit_type == 'fourier':
if 'n_harm' in fit_kwargs:
opt_params.update(dict(n_harm=fit_kwargs.get('n_harm')))
x_smooth, y_smooth = \
fourier_fit(x, y, n_predict, x_smooth,
**opt_params)
return x_smooth, y_smooth | [
"def",
"get_curvefit",
"(",
"x",
",",
"y",
",",
"fit_type",
",",
"x_smooth",
"=",
"None",
",",
"n_pts",
"=",
"n_pts_smooth",
",",
"fit_kwargs",
"=",
"None",
")",
":",
"interpolation_curve_fits",
"=",
"[",
"'gaussian'",
",",
"'gaussian_filter'",
",",
"'poly'",
",",
"'cubic_spline'",
"]",
"extrapolation_curve_filts",
"=",
"[",
"'fourier'",
"]",
"# Handle NaNs (omit them).",
"not_nan_mask",
"=",
"~",
"np",
".",
"isnan",
"(",
"y",
")",
"x",
"=",
"x",
"[",
"not_nan_mask",
"]",
"y",
"=",
"y",
"[",
"not_nan_mask",
"]",
"# Handle the cases of there being too few points to curve fit.",
"if",
"len",
"(",
"y",
")",
"==",
"0",
":",
"x_smooth",
"=",
"np",
".",
"repeat",
"(",
"np",
".",
"nan",
",",
"n_pts",
")",
"y_smooth",
"=",
"np",
".",
"repeat",
"(",
"np",
".",
"nan",
",",
"n_pts",
")",
"return",
"x_smooth",
",",
"y_smooth",
"if",
"len",
"(",
"y",
")",
"==",
"1",
":",
"x_smooth",
"=",
"np",
".",
"repeat",
"(",
"x",
"[",
"0",
"]",
",",
"n_pts",
")",
"y_smooth",
"=",
"np",
".",
"repeat",
"(",
"y",
"[",
"0",
"]",
",",
"n_pts",
")",
"return",
"x_smooth",
",",
"y_smooth",
"if",
"x_smooth",
"is",
"None",
":",
"x_smooth_inds",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"len",
"(",
"x",
")",
"-",
"1",
",",
"n_pts",
")",
"x_smooth",
"=",
"np",
".",
"interp",
"(",
"x_smooth_inds",
",",
"np",
".",
"arange",
"(",
"len",
"(",
"x",
")",
")",
",",
"x",
")",
"opt_params",
"=",
"{",
"}",
"if",
"fit_type",
"==",
"'gaussian'",
":",
"x_smooth",
",",
"y_smooth",
"=",
"gaussian_fit",
"(",
"x",
",",
"y",
",",
"x_smooth",
")",
"elif",
"fit_type",
"==",
"'gaussian_filter'",
":",
"if",
"'sigma'",
"in",
"fit_kwargs",
":",
"opt_params",
".",
"update",
"(",
"dict",
"(",
"sigma",
"=",
"fit_kwargs",
".",
"get",
"(",
"'sigma'",
")",
")",
")",
"x_smooth",
",",
"y_smooth",
"=",
"gaussian_filter_fit",
"(",
"x",
",",
"y",
",",
"x_smooth",
",",
"*",
"*",
"opt_params",
")",
"elif",
"fit_type",
"==",
"'poly'",
":",
"assert",
"'degree'",
"in",
"fit_kwargs",
".",
"keys",
"(",
")",
",",
"\"When plotting a polynomal fit, there must be\"",
"\"a 'degree' entry in the plot_kwargs parameter.\"",
"degree",
"=",
"fit_kwargs",
".",
"get",
"(",
"'degree'",
")",
"x_smooth",
",",
"y_smooth",
"=",
"poly_fit",
"(",
"x",
",",
"y",
",",
"degree",
",",
"x_smooth",
")",
"elif",
"fit_type",
"==",
"'cubic_spline'",
":",
"cs",
"=",
"CubicSpline",
"(",
"x",
",",
"y",
")",
"y_smooth",
"=",
"cs",
"(",
"x_smooth",
")",
"if",
"fit_type",
"in",
"extrapolation_curve_filts",
":",
"n_predict",
"=",
"fit_kwargs",
".",
"get",
"(",
"'n_predict'",
",",
"0",
")",
"if",
"fit_type",
"==",
"'fourier'",
":",
"if",
"'n_harm'",
"in",
"fit_kwargs",
":",
"opt_params",
".",
"update",
"(",
"dict",
"(",
"n_harm",
"=",
"fit_kwargs",
".",
"get",
"(",
"'n_harm'",
")",
")",
")",
"x_smooth",
",",
"y_smooth",
"=",
"fourier_fit",
"(",
"x",
",",
"y",
",",
"n_predict",
",",
"x_smooth",
",",
"*",
"*",
"opt_params",
")",
"return",
"x_smooth",
",",
"y_smooth"
] | [
782,
0
] | [
874,
29
] | python | en | ['en', 'error', 'th'] | False |
plot_curvefit | (x, y, fit_type, x_smooth=None, n_pts=n_pts_smooth, fig_params={}, plot_kwargs={}, fig=None, ax=None) |
**This function is DEPRECATED.**
Plots a curve fit given x values, y values, a type of curve to plot, and parameters for that curve.
Parameters
----------
x: np.ndarray
A 1D NumPy array. The x values to fit to.
y: np.ndarray
A 1D NumPy array. The y values to fit to.
fit_type: str
The type of curve to fit. One of ['gaussian', 'gaussian_filter', 'poly', 'cubic_spline'].
The option 'gaussian' plots a Gaussian fit.
The option 'gaussian_filter' plots a Gaussian filter fit.
The option 'poly' plots a polynomial fit.
The option 'cubic_spline' plots a cubic spline fit.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
fig_params: dict
Figure parameters dictionary (e.g. {'figsize':(12,6)}).
Used to create a Figure ``if fig is None and ax is None``.
plot_kwargs: dict
The kwargs for the call to ``matplotlib.axes.Axes.plot()``.
fig: matplotlib.figure.Figure
The figure to use for the plot. The figure must have at least one Axes object.
You can use the code ``fig,ax = plt.subplots()`` to create a figure with an associated Axes object.
The code ``fig = plt.figure()`` will not provide the Axes object.
The Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
Returns
-------
lines: matplotlib.lines.Line2D
Can be used as a handle for a matplotlib legend (i.e. plt.legend(handles=...)) among other things.
:Authors:
John Rattz ([email protected])
|
**This function is DEPRECATED.**
Plots a curve fit given x values, y values, a type of curve to plot, and parameters for that curve. | def plot_curvefit(x, y, fit_type, x_smooth=None, n_pts=n_pts_smooth, fig_params={}, plot_kwargs={}, fig=None, ax=None):
"""
**This function is DEPRECATED.**
Plots a curve fit given x values, y values, a type of curve to plot, and parameters for that curve.
Parameters
----------
x: np.ndarray
A 1D NumPy array. The x values to fit to.
y: np.ndarray
A 1D NumPy array. The y values to fit to.
fit_type: str
The type of curve to fit. One of ['gaussian', 'gaussian_filter', 'poly', 'cubic_spline'].
The option 'gaussian' plots a Gaussian fit.
The option 'gaussian_filter' plots a Gaussian filter fit.
The option 'poly' plots a polynomial fit.
The option 'cubic_spline' plots a cubic spline fit.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
fig_params: dict
Figure parameters dictionary (e.g. {'figsize':(12,6)}).
Used to create a Figure ``if fig is None and ax is None``.
plot_kwargs: dict
The kwargs for the call to ``matplotlib.axes.Axes.plot()``.
fig: matplotlib.figure.Figure
The figure to use for the plot. The figure must have at least one Axes object.
You can use the code ``fig,ax = plt.subplots()`` to create a figure with an associated Axes object.
The code ``fig = plt.figure()`` will not provide the Axes object.
The Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
Returns
-------
lines: matplotlib.lines.Line2D
Can be used as a handle for a matplotlib legend (i.e. plt.legend(handles=...)) among other things.
:Authors:
John Rattz ([email protected])
"""
# Avoid modifying the original arguments.
fig_params, plot_kwargs = fig_params.copy(), plot_kwargs.copy()
fig_params.setdefault('figsize', (12, 6))
plot_kwargs.setdefault('linestyle', '-')
# Retrieve or create the axes if necessary.
fig, ax = retrieve_or_create_fig_ax(fig, ax, **fig_params)
if x_smooth is None:
x_smooth = np.linspace(x.min(), x.max(), n_pts)
if fit_type == 'gaussian':
y_smooth = gaussian_fit(x, y, x_smooth)
elif fit_type == 'gaussian_filter':
sigma = plot_kwargs.pop('sigma', None)
y_smooth = gaussian_filter_fit(x, y, x_smooth, sigma=sigma)
elif fit_type == 'poly':
assert 'degree' in plot_kwargs.keys(), "When plotting a polynomal fit, there must be" \
"a 'degree' entry in the plot_kwargs parameter."
degree = plot_kwargs.pop('degree')
y_smooth = poly_fit(x, y, degree, x_smooth)
elif fit_type == 'cubic_spline':
cs = CubicSpline(x, y)
y_smooth = cs(x_smooth)
return ax.plot(x_smooth, y_smooth, **plot_kwargs)[0] | [
"def",
"plot_curvefit",
"(",
"x",
",",
"y",
",",
"fit_type",
",",
"x_smooth",
"=",
"None",
",",
"n_pts",
"=",
"n_pts_smooth",
",",
"fig_params",
"=",
"{",
"}",
",",
"plot_kwargs",
"=",
"{",
"}",
",",
"fig",
"=",
"None",
",",
"ax",
"=",
"None",
")",
":",
"# Avoid modifying the original arguments.",
"fig_params",
",",
"plot_kwargs",
"=",
"fig_params",
".",
"copy",
"(",
")",
",",
"plot_kwargs",
".",
"copy",
"(",
")",
"fig_params",
".",
"setdefault",
"(",
"'figsize'",
",",
"(",
"12",
",",
"6",
")",
")",
"plot_kwargs",
".",
"setdefault",
"(",
"'linestyle'",
",",
"'-'",
")",
"# Retrieve or create the axes if necessary.",
"fig",
",",
"ax",
"=",
"retrieve_or_create_fig_ax",
"(",
"fig",
",",
"ax",
",",
"*",
"*",
"fig_params",
")",
"if",
"x_smooth",
"is",
"None",
":",
"x_smooth",
"=",
"np",
".",
"linspace",
"(",
"x",
".",
"min",
"(",
")",
",",
"x",
".",
"max",
"(",
")",
",",
"n_pts",
")",
"if",
"fit_type",
"==",
"'gaussian'",
":",
"y_smooth",
"=",
"gaussian_fit",
"(",
"x",
",",
"y",
",",
"x_smooth",
")",
"elif",
"fit_type",
"==",
"'gaussian_filter'",
":",
"sigma",
"=",
"plot_kwargs",
".",
"pop",
"(",
"'sigma'",
",",
"None",
")",
"y_smooth",
"=",
"gaussian_filter_fit",
"(",
"x",
",",
"y",
",",
"x_smooth",
",",
"sigma",
"=",
"sigma",
")",
"elif",
"fit_type",
"==",
"'poly'",
":",
"assert",
"'degree'",
"in",
"plot_kwargs",
".",
"keys",
"(",
")",
",",
"\"When plotting a polynomal fit, there must be\"",
"\"a 'degree' entry in the plot_kwargs parameter.\"",
"degree",
"=",
"plot_kwargs",
".",
"pop",
"(",
"'degree'",
")",
"y_smooth",
"=",
"poly_fit",
"(",
"x",
",",
"y",
",",
"degree",
",",
"x_smooth",
")",
"elif",
"fit_type",
"==",
"'cubic_spline'",
":",
"cs",
"=",
"CubicSpline",
"(",
"x",
",",
"y",
")",
"y_smooth",
"=",
"cs",
"(",
"x_smooth",
")",
"return",
"ax",
".",
"plot",
"(",
"x_smooth",
",",
"y_smooth",
",",
"*",
"*",
"plot_kwargs",
")",
"[",
"0",
"]"
] | [
877,
0
] | [
942,
56
] | python | en | ['en', 'error', 'th'] | False |
plot_band | (dataset, figsize=(20, 15), fontsize=24, legend_fontsize=24) |
Plots several statistics over time - including mean, median, linear regression of the
means, Gaussian smoothed curve of means, and the band enclosing the 25th and 75th percentiles.
This is very similar to the output of the Comet Time Series Toolset (https://github.com/CosmiQ/CometTS).
Parameters
----------
dataset: xarray.DataArray
An xarray `DataArray` containing time, latitude, and longitude coordinates.
figsize: tuple
A 2-tuple of the figure size in inches for the entire figure.
fontsize: int
The font size to use for text.
|
Plots several statistics over time - including mean, median, linear regression of the
means, Gaussian smoothed curve of means, and the band enclosing the 25th and 75th percentiles.
This is very similar to the output of the Comet Time Series Toolset (https://github.com/CosmiQ/CometTS). | def plot_band(dataset, figsize=(20, 15), fontsize=24, legend_fontsize=24):
"""
Plots several statistics over time - including mean, median, linear regression of the
means, Gaussian smoothed curve of means, and the band enclosing the 25th and 75th percentiles.
This is very similar to the output of the Comet Time Series Toolset (https://github.com/CosmiQ/CometTS).
Parameters
----------
dataset: xarray.DataArray
An xarray `DataArray` containing time, latitude, and longitude coordinates.
figsize: tuple
A 2-tuple of the figure size in inches for the entire figure.
fontsize: int
The font size to use for text.
"""
# Calculations
times = dataset.time.values
epochs = np.sort(np.array(list(map(n64_to_epoch, times))))
x_locs = (epochs - epochs.min()) / (epochs.max() - epochs.min())
means = dataset.mean(dim=['latitude', 'longitude'], skipna=True).values
medians = dataset.median(dim=['latitude', 'longitude'], skipna=True).values
mask = ~np.isnan(means) & ~np.isnan(medians)
plt.figure(figsize=figsize)
ax = plt.gca()
# Shaded Area (percentiles)
with warnings.catch_warnings():
# Ignore warning about encountering an All-NaN slice. Some acquisitions have all-NaN values.
warnings.simplefilter("ignore", category=RuntimeWarning)
quarter = np.nanpercentile(
dataset.values.reshape((
len(dataset['time']),
len(dataset['latitude']) * len(dataset['longitude']))),
25,
axis=1
)
three_quarters = np.nanpercentile(
dataset.values.reshape((
len(dataset['time']),
len(dataset['latitude']) * len(dataset['longitude']))),
75,
axis=1
)
np.array(quarter)
np.array(three_quarters)
ax.grid(color='lightgray', linestyle='-', linewidth=1)
fillcolor = 'gray'
fillalpha = 0.4
plt.fill_between(x_locs, quarter, three_quarters, interpolate=False, color=fillcolor, alpha=fillalpha,
label="25th and 75th percentile band")
# Medians
plt.plot(x_locs, medians, color="black", marker="o", linestyle='None', label="Medians")
# The Actual Plot
plt.plot(x_locs, means, color="blue", label="Mean")
# Linear Regression (on mean)
m, b = np.polyfit(x_locs[mask], means[mask], 1)
plt.plot(x_locs, m * x_locs + b, '-', color="red", label="linear regression of means", linewidth=3.0)
# Gaussian Curve
plot_curvefit(x_locs[mask], means[mask], fit_type='gaussian', ax=ax,
plot_kwargs=dict(linestyle='-', label="Gaussian smoothed of means",
alpha=1, color='limegreen', linewidth=3.0))
# Formatting
date_strs = np.array(list(map(lambda time: np_dt64_to_str(time), times[mask])))
ax.grid(color='k', alpha=0.1, linestyle='-', linewidth=1)
ax.xaxis.set_major_formatter(FuncFormatter(tfmt))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=legend_fontsize)
plt.xticks(x_locs, date_strs, rotation=45, fontsize=fontsize)
plt.yticks(fontsize=fontsize)
ax.set_xlabel('Time', fontsize=fontsize)
ax.set_ylabel('Value', fontsize=fontsize)
plt.show() | [
"def",
"plot_band",
"(",
"dataset",
",",
"figsize",
"=",
"(",
"20",
",",
"15",
")",
",",
"fontsize",
"=",
"24",
",",
"legend_fontsize",
"=",
"24",
")",
":",
"# Calculations",
"times",
"=",
"dataset",
".",
"time",
".",
"values",
"epochs",
"=",
"np",
".",
"sort",
"(",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"n64_to_epoch",
",",
"times",
")",
")",
")",
")",
"x_locs",
"=",
"(",
"epochs",
"-",
"epochs",
".",
"min",
"(",
")",
")",
"/",
"(",
"epochs",
".",
"max",
"(",
")",
"-",
"epochs",
".",
"min",
"(",
")",
")",
"means",
"=",
"dataset",
".",
"mean",
"(",
"dim",
"=",
"[",
"'latitude'",
",",
"'longitude'",
"]",
",",
"skipna",
"=",
"True",
")",
".",
"values",
"medians",
"=",
"dataset",
".",
"median",
"(",
"dim",
"=",
"[",
"'latitude'",
",",
"'longitude'",
"]",
",",
"skipna",
"=",
"True",
")",
".",
"values",
"mask",
"=",
"~",
"np",
".",
"isnan",
"(",
"means",
")",
"&",
"~",
"np",
".",
"isnan",
"(",
"medians",
")",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"# Shaded Area (percentiles)",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"# Ignore warning about encountering an All-NaN slice. Some acquisitions have all-NaN values.",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
",",
"category",
"=",
"RuntimeWarning",
")",
"quarter",
"=",
"np",
".",
"nanpercentile",
"(",
"dataset",
".",
"values",
".",
"reshape",
"(",
"(",
"len",
"(",
"dataset",
"[",
"'time'",
"]",
")",
",",
"len",
"(",
"dataset",
"[",
"'latitude'",
"]",
")",
"*",
"len",
"(",
"dataset",
"[",
"'longitude'",
"]",
")",
")",
")",
",",
"25",
",",
"axis",
"=",
"1",
")",
"three_quarters",
"=",
"np",
".",
"nanpercentile",
"(",
"dataset",
".",
"values",
".",
"reshape",
"(",
"(",
"len",
"(",
"dataset",
"[",
"'time'",
"]",
")",
",",
"len",
"(",
"dataset",
"[",
"'latitude'",
"]",
")",
"*",
"len",
"(",
"dataset",
"[",
"'longitude'",
"]",
")",
")",
")",
",",
"75",
",",
"axis",
"=",
"1",
")",
"np",
".",
"array",
"(",
"quarter",
")",
"np",
".",
"array",
"(",
"three_quarters",
")",
"ax",
".",
"grid",
"(",
"color",
"=",
"'lightgray'",
",",
"linestyle",
"=",
"'-'",
",",
"linewidth",
"=",
"1",
")",
"fillcolor",
"=",
"'gray'",
"fillalpha",
"=",
"0.4",
"plt",
".",
"fill_between",
"(",
"x_locs",
",",
"quarter",
",",
"three_quarters",
",",
"interpolate",
"=",
"False",
",",
"color",
"=",
"fillcolor",
",",
"alpha",
"=",
"fillalpha",
",",
"label",
"=",
"\"25th and 75th percentile band\"",
")",
"# Medians",
"plt",
".",
"plot",
"(",
"x_locs",
",",
"medians",
",",
"color",
"=",
"\"black\"",
",",
"marker",
"=",
"\"o\"",
",",
"linestyle",
"=",
"'None'",
",",
"label",
"=",
"\"Medians\"",
")",
"# The Actual Plot",
"plt",
".",
"plot",
"(",
"x_locs",
",",
"means",
",",
"color",
"=",
"\"blue\"",
",",
"label",
"=",
"\"Mean\"",
")",
"# Linear Regression (on mean)",
"m",
",",
"b",
"=",
"np",
".",
"polyfit",
"(",
"x_locs",
"[",
"mask",
"]",
",",
"means",
"[",
"mask",
"]",
",",
"1",
")",
"plt",
".",
"plot",
"(",
"x_locs",
",",
"m",
"*",
"x_locs",
"+",
"b",
",",
"'-'",
",",
"color",
"=",
"\"red\"",
",",
"label",
"=",
"\"linear regression of means\"",
",",
"linewidth",
"=",
"3.0",
")",
"# Gaussian Curve",
"plot_curvefit",
"(",
"x_locs",
"[",
"mask",
"]",
",",
"means",
"[",
"mask",
"]",
",",
"fit_type",
"=",
"'gaussian'",
",",
"ax",
"=",
"ax",
",",
"plot_kwargs",
"=",
"dict",
"(",
"linestyle",
"=",
"'-'",
",",
"label",
"=",
"\"Gaussian smoothed of means\"",
",",
"alpha",
"=",
"1",
",",
"color",
"=",
"'limegreen'",
",",
"linewidth",
"=",
"3.0",
")",
")",
"# Formatting",
"date_strs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"lambda",
"time",
":",
"np_dt64_to_str",
"(",
"time",
")",
",",
"times",
"[",
"mask",
"]",
")",
")",
")",
"ax",
".",
"grid",
"(",
"color",
"=",
"'k'",
",",
"alpha",
"=",
"0.1",
",",
"linestyle",
"=",
"'-'",
",",
"linewidth",
"=",
"1",
")",
"ax",
".",
"xaxis",
".",
"set_major_formatter",
"(",
"FuncFormatter",
"(",
"tfmt",
")",
")",
"plt",
".",
"legend",
"(",
"loc",
"=",
"'center left'",
",",
"bbox_to_anchor",
"=",
"(",
"1",
",",
"0.5",
")",
",",
"fontsize",
"=",
"legend_fontsize",
")",
"plt",
".",
"xticks",
"(",
"x_locs",
",",
"date_strs",
",",
"rotation",
"=",
"45",
",",
"fontsize",
"=",
"fontsize",
")",
"plt",
".",
"yticks",
"(",
"fontsize",
"=",
"fontsize",
")",
"ax",
".",
"set_xlabel",
"(",
"'Time'",
",",
"fontsize",
"=",
"fontsize",
")",
"ax",
".",
"set_ylabel",
"(",
"'Value'",
",",
"fontsize",
"=",
"fontsize",
")",
"plt",
".",
"show",
"(",
")"
] | [
947,
0
] | [
1023,
14
] | python | en | ['en', 'error', 'th'] | False |
convert_name_rgb_255 | (color) |
Converts a name of a matplotlib color to a list of rgb values in the range [0,255].
Else, returns the original argument.
Parameters
----------
color: str
The color name to convert to an rgb list.
|
Converts a name of a matplotlib color to a list of rgb values in the range [0,255].
Else, returns the original argument. | def convert_name_rgb_255(color):
"""
Converts a name of a matplotlib color to a list of rgb values in the range [0,255].
Else, returns the original argument.
Parameters
----------
color: str
The color name to convert to an rgb list.
"""
return [int(255 * rgb) for rgb in mpl.colors.to_rgb(color)] if isinstance(color, str) else color | [
"def",
"convert_name_rgb_255",
"(",
"color",
")",
":",
"return",
"[",
"int",
"(",
"255",
"*",
"rgb",
")",
"for",
"rgb",
"in",
"mpl",
".",
"colors",
".",
"to_rgb",
"(",
"color",
")",
"]",
"if",
"isinstance",
"(",
"color",
",",
"str",
")",
"else",
"color"
] | [
1028,
0
] | [
1038,
100
] | python | en | ['en', 'error', 'th'] | False |
convert_name_rgba_255 | (color) |
Converts a name of a matplotlib color to a list of rgba values in the range [0,255].
Else, returns the original argument.
Parameters
----------
color: str
The color name to convert to an rgba list.
|
Converts a name of a matplotlib color to a list of rgba values in the range [0,255].
Else, returns the original argument. | def convert_name_rgba_255(color):
"""
Converts a name of a matplotlib color to a list of rgba values in the range [0,255].
Else, returns the original argument.
Parameters
----------
color: str
The color name to convert to an rgba list.
"""
return [*convert_name_rgb_255(color), 255] if isinstance(color, str) else color | [
"def",
"convert_name_rgba_255",
"(",
"color",
")",
":",
"return",
"[",
"*",
"convert_name_rgb_255",
"(",
"color",
")",
",",
"255",
"]",
"if",
"isinstance",
"(",
"color",
",",
"str",
")",
"else",
"color"
] | [
1041,
0
] | [
1051,
83
] | python | en | ['en', 'error', 'th'] | False |
norm_color | (color) |
Converts either a string name of a matplotlib color or a 3-tuple of rgb values
in the range [0,255] to a 3-tuple of rgb values in the range [0,1].
Parameters
----------
color: str or list-like of numeric
The name of a matplolib color or a .
|
Converts either a string name of a matplotlib color or a 3-tuple of rgb values
in the range [0,255] to a 3-tuple of rgb values in the range [0,1]. | def norm_color(color):
"""
Converts either a string name of a matplotlib color or a 3-tuple of rgb values
in the range [0,255] to a 3-tuple of rgb values in the range [0,1].
Parameters
----------
color: str or list-like of numeric
The name of a matplolib color or a .
"""
color = convert_name_rgb_255(color)
if len(color) == 3:
color = [rgb / 255 for rgb in color]
return color | [
"def",
"norm_color",
"(",
"color",
")",
":",
"color",
"=",
"convert_name_rgb_255",
"(",
"color",
")",
"if",
"len",
"(",
"color",
")",
"==",
"3",
":",
"color",
"=",
"[",
"rgb",
"/",
"255",
"for",
"rgb",
"in",
"color",
"]",
"return",
"color"
] | [
1054,
0
] | [
1067,
16
] | python | en | ['en', 'error', 'th'] | False |
create_discrete_color_map | (data_range=None, colors=None, cmap=None,
th=None, pts=None, cmap_name='my_cmap',
data_range_fmt=None, pts_fmt=None) |
Creates a discrete matplotlib LinearSegmentedColormap with thresholds for color changes.
Exclusively either `colors` or `cmap` must be specified (i.e. one and only one).
At least one of the parameters `th` or `pts` may be specified, but not both.
Parameters
----------
data_range: list
A 2-tuple of the minimum and maximum values the data may take.
Can be omitted if `pts` is specified as a list-like of points.
colors: list-like
Colors to use between thresholds specified in `th` or around points specified in `pts`.
Colors can be string names of matplotlib colors, 3-tuples of rgb values in range [0,255],
or 4-tuples of rgba values in range [0,1].
cmap: matplotlib.colors.Colormap
A matplotlib colormap used to color data in the regions between thresholds
specified in `th` or around points specified in `pts`.
th: list-like of float
Threshold values separating colors, so `len(colors) == len(th)+1`.
Must be in the range of `data_range` - noninclusive.
pts: int or list-like of float
Points around which to color the same. This can be either an integer
specifying the number of evenly-spaced points to use or a list-like of points,
in which case values must be in the range of `data_range` - inclusive.
The thresholds used will be the midpoints between points in `pts`.
cmap_name: str
The name of the created colormap for matplotlib.
data_range_fmt: list-like of size 2
A mutable container intended to hold values used to set vmin and vmax, respectively, of
`pyplot.imshow()` for the purpose of formatting a colorbar. Only useful if `pts` is
specified as a list-like.
pts_fmt: list-like
A mutable container intended to hold the midpoints of the thresholds. This must have the same length
as the number of points specified by `pts` or have a length of `len(th)+1`.
:Authors:
John Rattz ([email protected])
|
Creates a discrete matplotlib LinearSegmentedColormap with thresholds for color changes.
Exclusively either `colors` or `cmap` must be specified (i.e. one and only one).
At least one of the parameters `th` or `pts` may be specified, but not both. | def create_discrete_color_map(data_range=None, colors=None, cmap=None,
th=None, pts=None, cmap_name='my_cmap',
data_range_fmt=None, pts_fmt=None):
"""
Creates a discrete matplotlib LinearSegmentedColormap with thresholds for color changes.
Exclusively either `colors` or `cmap` must be specified (i.e. one and only one).
At least one of the parameters `th` or `pts` may be specified, but not both.
Parameters
----------
data_range: list
A 2-tuple of the minimum and maximum values the data may take.
Can be omitted if `pts` is specified as a list-like of points.
colors: list-like
Colors to use between thresholds specified in `th` or around points specified in `pts`.
Colors can be string names of matplotlib colors, 3-tuples of rgb values in range [0,255],
or 4-tuples of rgba values in range [0,1].
cmap: matplotlib.colors.Colormap
A matplotlib colormap used to color data in the regions between thresholds
specified in `th` or around points specified in `pts`.
th: list-like of float
Threshold values separating colors, so `len(colors) == len(th)+1`.
Must be in the range of `data_range` - noninclusive.
pts: int or list-like of float
Points around which to color the same. This can be either an integer
specifying the number of evenly-spaced points to use or a list-like of points,
in which case values must be in the range of `data_range` - inclusive.
The thresholds used will be the midpoints between points in `pts`.
cmap_name: str
The name of the created colormap for matplotlib.
data_range_fmt: list-like of size 2
A mutable container intended to hold values used to set vmin and vmax, respectively, of
`pyplot.imshow()` for the purpose of formatting a colorbar. Only useful if `pts` is
specified as a list-like.
pts_fmt: list-like
A mutable container intended to hold the midpoints of the thresholds. This must have the same length
as the number of points specified by `pts` or have a length of `len(th)+1`.
:Authors:
John Rattz ([email protected])
"""
assert (colors is None) ^ (cmap is None), \
"Exclusively either `colors` or `cmap` must be specified."
assert th is None or pts is None, \
"The parameters `th` or `pts` may be specified, but not both."
cmap = plt.get_cmap(cmap) if isinstance(cmap, str) else cmap
if th is None: # If `th` is not supplied, construct it based on other arguments.
if pts is not None:
if isinstance(pts, int): # Use `pts` as the number of evenly-spaced points.
assert pts > 0, "The number of points specified by `pts` must be positive."
th_spacing = (data_range[1] - data_range[0]) / pts
th = np.linspace(data_range[0] + th_spacing, data_range[1] - th_spacing, pts - 1)
else: # Use `pts` as a list-like of points to put thresholds between.
assert data_range[0] <= min(pts) and max(pts) <= data_range[1], \
"The values in `pts` must be within `data_range`, inclusive."
assert len(pts) > 0, "The parameter `pts` is a list, but it has no elements. " \
"Please ensure it has one or more numeric elements."
if len(pts) == 1:
th = []
elif len(pts) > 1:
# Choose imaginary lower and upper bounds of the data to scale `pts` with
# so that the first and last color regions are sized appropriately.
data_range_fmt = [None] * 2 if data_range_fmt is None else data_range_fmt
data_range_fmt[0] = pts[0] - (pts[1] - pts[0]) / 2
data_range_fmt[1] = pts[-1] + (pts[-1] - pts[-2]) / 2
pts = np.interp(pts, data_range_fmt, data_range) # (0,1))
th = [pts[ind - 1] + (pts[ind] - pts[ind - 1]) / 2 for ind in range(1, len(pts))]
else:
assert colors is not None, \
"If neither `th` nor `pts` are specified, `colors` must be specified."
th_spacing = (data_range[1] - data_range[0]) / len(colors)
th = np.linspace(data_range[0] + th_spacing, data_range[1] - th_spacing, len(colors) - 1)
else:
assert len(th) == 0 or (data_range[0] < min(th) and max(th) < data_range[1]), \
"The values in `th` must be within `data_range`, exclusive."
# Normalize threshold values based on the data range.
th = [(val - data_range[0]) / (data_range[1] - data_range[0]) for val in th]
th = np.interp(th, data_range, (0, 1))
th = [0.0] + list(th) + [1.0]
if pts_fmt is not None:
for ind in range(len(th) - 1):
pts_fmt[ind] = th[ind] + (th[ind + 1] - th[ind]) / 2
if colors is None: # If `colors` is not supplied, construct it based on other arguments.
assert cmap is not None, \
"If `colors` is not specified, `cmap` must be specified."
colors = [cmap(th[ind - 1] + (th[ind] - th[ind - 1]) / 2) for ind in range(1, len(th))]
else:
colors = list(map(norm_color, colors))
cdict = {}
# These are fully-saturated red, green, and blue - not the matplotlib colors for 'red', 'green', and 'blue'.
primary_colors = ['red', 'green', 'blue']
# Get the 3-tuples of rgb values for the colors.
color_rgbs = [(mpl.colors.to_rgb(color) if isinstance(color, str) else color) for color in colors]
# For each color entry to go into the color dictionary...
for primary_color_ind, primary_color in enumerate(primary_colors):
cdict_entry = [None] * len(th)
# For each threshold (as well as 0.0 and 1.0), specify the values for this primary color.
for row_ind, th_ind in enumerate(range(len(th))):
# Get the two colors that this threshold corresponds to.
th_color_inds = [0, 0] if th_ind == 0 else \
[len(colors) - 1, len(colors) - 1] if th_ind == len(th) - 1 else \
[th_ind - 1, th_ind]
primary_color_vals = [color_rgbs[th_color_ind][primary_color_ind] for th_color_ind in th_color_inds]
cdict_entry[row_ind] = (th[th_ind],) + tuple(primary_color_vals)
cdict[primary_color] = cdict_entry
cmap = LinearSegmentedColormap(cmap_name, cdict)
return cmap | [
"def",
"create_discrete_color_map",
"(",
"data_range",
"=",
"None",
",",
"colors",
"=",
"None",
",",
"cmap",
"=",
"None",
",",
"th",
"=",
"None",
",",
"pts",
"=",
"None",
",",
"cmap_name",
"=",
"'my_cmap'",
",",
"data_range_fmt",
"=",
"None",
",",
"pts_fmt",
"=",
"None",
")",
":",
"assert",
"(",
"colors",
"is",
"None",
")",
"^",
"(",
"cmap",
"is",
"None",
")",
",",
"\"Exclusively either `colors` or `cmap` must be specified.\"",
"assert",
"th",
"is",
"None",
"or",
"pts",
"is",
"None",
",",
"\"The parameters `th` or `pts` may be specified, but not both.\"",
"cmap",
"=",
"plt",
".",
"get_cmap",
"(",
"cmap",
")",
"if",
"isinstance",
"(",
"cmap",
",",
"str",
")",
"else",
"cmap",
"if",
"th",
"is",
"None",
":",
"# If `th` is not supplied, construct it based on other arguments.",
"if",
"pts",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"pts",
",",
"int",
")",
":",
"# Use `pts` as the number of evenly-spaced points.",
"assert",
"pts",
">",
"0",
",",
"\"The number of points specified by `pts` must be positive.\"",
"th_spacing",
"=",
"(",
"data_range",
"[",
"1",
"]",
"-",
"data_range",
"[",
"0",
"]",
")",
"/",
"pts",
"th",
"=",
"np",
".",
"linspace",
"(",
"data_range",
"[",
"0",
"]",
"+",
"th_spacing",
",",
"data_range",
"[",
"1",
"]",
"-",
"th_spacing",
",",
"pts",
"-",
"1",
")",
"else",
":",
"# Use `pts` as a list-like of points to put thresholds between.",
"assert",
"data_range",
"[",
"0",
"]",
"<=",
"min",
"(",
"pts",
")",
"and",
"max",
"(",
"pts",
")",
"<=",
"data_range",
"[",
"1",
"]",
",",
"\"The values in `pts` must be within `data_range`, inclusive.\"",
"assert",
"len",
"(",
"pts",
")",
">",
"0",
",",
"\"The parameter `pts` is a list, but it has no elements. \"",
"\"Please ensure it has one or more numeric elements.\"",
"if",
"len",
"(",
"pts",
")",
"==",
"1",
":",
"th",
"=",
"[",
"]",
"elif",
"len",
"(",
"pts",
")",
">",
"1",
":",
"# Choose imaginary lower and upper bounds of the data to scale `pts` with",
"# so that the first and last color regions are sized appropriately.",
"data_range_fmt",
"=",
"[",
"None",
"]",
"*",
"2",
"if",
"data_range_fmt",
"is",
"None",
"else",
"data_range_fmt",
"data_range_fmt",
"[",
"0",
"]",
"=",
"pts",
"[",
"0",
"]",
"-",
"(",
"pts",
"[",
"1",
"]",
"-",
"pts",
"[",
"0",
"]",
")",
"/",
"2",
"data_range_fmt",
"[",
"1",
"]",
"=",
"pts",
"[",
"-",
"1",
"]",
"+",
"(",
"pts",
"[",
"-",
"1",
"]",
"-",
"pts",
"[",
"-",
"2",
"]",
")",
"/",
"2",
"pts",
"=",
"np",
".",
"interp",
"(",
"pts",
",",
"data_range_fmt",
",",
"data_range",
")",
"# (0,1))",
"th",
"=",
"[",
"pts",
"[",
"ind",
"-",
"1",
"]",
"+",
"(",
"pts",
"[",
"ind",
"]",
"-",
"pts",
"[",
"ind",
"-",
"1",
"]",
")",
"/",
"2",
"for",
"ind",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"pts",
")",
")",
"]",
"else",
":",
"assert",
"colors",
"is",
"not",
"None",
",",
"\"If neither `th` nor `pts` are specified, `colors` must be specified.\"",
"th_spacing",
"=",
"(",
"data_range",
"[",
"1",
"]",
"-",
"data_range",
"[",
"0",
"]",
")",
"/",
"len",
"(",
"colors",
")",
"th",
"=",
"np",
".",
"linspace",
"(",
"data_range",
"[",
"0",
"]",
"+",
"th_spacing",
",",
"data_range",
"[",
"1",
"]",
"-",
"th_spacing",
",",
"len",
"(",
"colors",
")",
"-",
"1",
")",
"else",
":",
"assert",
"len",
"(",
"th",
")",
"==",
"0",
"or",
"(",
"data_range",
"[",
"0",
"]",
"<",
"min",
"(",
"th",
")",
"and",
"max",
"(",
"th",
")",
"<",
"data_range",
"[",
"1",
"]",
")",
",",
"\"The values in `th` must be within `data_range`, exclusive.\"",
"# Normalize threshold values based on the data range.",
"th",
"=",
"[",
"(",
"val",
"-",
"data_range",
"[",
"0",
"]",
")",
"/",
"(",
"data_range",
"[",
"1",
"]",
"-",
"data_range",
"[",
"0",
"]",
")",
"for",
"val",
"in",
"th",
"]",
"th",
"=",
"np",
".",
"interp",
"(",
"th",
",",
"data_range",
",",
"(",
"0",
",",
"1",
")",
")",
"th",
"=",
"[",
"0.0",
"]",
"+",
"list",
"(",
"th",
")",
"+",
"[",
"1.0",
"]",
"if",
"pts_fmt",
"is",
"not",
"None",
":",
"for",
"ind",
"in",
"range",
"(",
"len",
"(",
"th",
")",
"-",
"1",
")",
":",
"pts_fmt",
"[",
"ind",
"]",
"=",
"th",
"[",
"ind",
"]",
"+",
"(",
"th",
"[",
"ind",
"+",
"1",
"]",
"-",
"th",
"[",
"ind",
"]",
")",
"/",
"2",
"if",
"colors",
"is",
"None",
":",
"# If `colors` is not supplied, construct it based on other arguments.",
"assert",
"cmap",
"is",
"not",
"None",
",",
"\"If `colors` is not specified, `cmap` must be specified.\"",
"colors",
"=",
"[",
"cmap",
"(",
"th",
"[",
"ind",
"-",
"1",
"]",
"+",
"(",
"th",
"[",
"ind",
"]",
"-",
"th",
"[",
"ind",
"-",
"1",
"]",
")",
"/",
"2",
")",
"for",
"ind",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"th",
")",
")",
"]",
"else",
":",
"colors",
"=",
"list",
"(",
"map",
"(",
"norm_color",
",",
"colors",
")",
")",
"cdict",
"=",
"{",
"}",
"# These are fully-saturated red, green, and blue - not the matplotlib colors for 'red', 'green', and 'blue'.",
"primary_colors",
"=",
"[",
"'red'",
",",
"'green'",
",",
"'blue'",
"]",
"# Get the 3-tuples of rgb values for the colors.",
"color_rgbs",
"=",
"[",
"(",
"mpl",
".",
"colors",
".",
"to_rgb",
"(",
"color",
")",
"if",
"isinstance",
"(",
"color",
",",
"str",
")",
"else",
"color",
")",
"for",
"color",
"in",
"colors",
"]",
"# For each color entry to go into the color dictionary...",
"for",
"primary_color_ind",
",",
"primary_color",
"in",
"enumerate",
"(",
"primary_colors",
")",
":",
"cdict_entry",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"th",
")",
"# For each threshold (as well as 0.0 and 1.0), specify the values for this primary color.",
"for",
"row_ind",
",",
"th_ind",
"in",
"enumerate",
"(",
"range",
"(",
"len",
"(",
"th",
")",
")",
")",
":",
"# Get the two colors that this threshold corresponds to.",
"th_color_inds",
"=",
"[",
"0",
",",
"0",
"]",
"if",
"th_ind",
"==",
"0",
"else",
"[",
"len",
"(",
"colors",
")",
"-",
"1",
",",
"len",
"(",
"colors",
")",
"-",
"1",
"]",
"if",
"th_ind",
"==",
"len",
"(",
"th",
")",
"-",
"1",
"else",
"[",
"th_ind",
"-",
"1",
",",
"th_ind",
"]",
"primary_color_vals",
"=",
"[",
"color_rgbs",
"[",
"th_color_ind",
"]",
"[",
"primary_color_ind",
"]",
"for",
"th_color_ind",
"in",
"th_color_inds",
"]",
"cdict_entry",
"[",
"row_ind",
"]",
"=",
"(",
"th",
"[",
"th_ind",
"]",
",",
")",
"+",
"tuple",
"(",
"primary_color_vals",
")",
"cdict",
"[",
"primary_color",
"]",
"=",
"cdict_entry",
"cmap",
"=",
"LinearSegmentedColormap",
"(",
"cmap_name",
",",
"cdict",
")",
"return",
"cmap"
] | [
1074,
0
] | [
1184,
15
] | python | en | ['en', 'error', 'th'] | False |
create_gradient_color_map | (data_range, colors, positions=None, cmap_name='my_cmap') |
Creates a gradient colormap with a LinearSegmentedColormap. Currently only creates linear gradients.
Parameters
----------
data_range: list-like
A 2-tuple of the minimum and maximum values the data may take.
colors: list of str or list of tuple
Colors can be string names of matplotlib colors or 3-tuples of rgb values in range [0,255].
The first and last colors are placed at the beginning and end of the colormap, respectively.
positions: list-like
The values which are colored with corresponding colors in `colors`,
except the first and last colors, so `len(positions) == len(colors)-2`.
Positions must be in the range of `data_range` - noninclusive.
If no positions are provided, the colors are evenly spaced.
cmap_name: str
The name of the created colormap for matplotlib.
Examples
--------
Creating a linear gradient colormap of red, green, and blue, with even spacing between them:
create_gradient_color_map(data_range=(0,1), positions=(0.5,), colors=('red', 'green', 'blue'))
Which can also be done without specifying `positions`:
create_gradient_color_map(data_range=(0,1), colors=('red', 'green', 'blue'))
|
Creates a gradient colormap with a LinearSegmentedColormap. Currently only creates linear gradients. | def create_gradient_color_map(data_range, colors, positions=None, cmap_name='my_cmap'):
"""
Creates a gradient colormap with a LinearSegmentedColormap. Currently only creates linear gradients.
Parameters
----------
data_range: list-like
A 2-tuple of the minimum and maximum values the data may take.
colors: list of str or list of tuple
Colors can be string names of matplotlib colors or 3-tuples of rgb values in range [0,255].
The first and last colors are placed at the beginning and end of the colormap, respectively.
positions: list-like
The values which are colored with corresponding colors in `colors`,
except the first and last colors, so `len(positions) == len(colors)-2`.
Positions must be in the range of `data_range` - noninclusive.
If no positions are provided, the colors are evenly spaced.
cmap_name: str
The name of the created colormap for matplotlib.
Examples
--------
Creating a linear gradient colormap of red, green, and blue, with even spacing between them:
create_gradient_color_map(data_range=(0,1), positions=(0.5,), colors=('red', 'green', 'blue'))
Which can also be done without specifying `positions`:
create_gradient_color_map(data_range=(0,1), colors=('red', 'green', 'blue'))
"""
# Normalize position values based on the data range.
if positions is None:
range_size = data_range[1] - data_range[0]
spacing = range_size / (len(colors) - 1)
positions = [spacing * i for i in range(1, len(colors) - 1)]
else:
positions = list(map(lambda val: (val - data_range[0]) / (data_range[1] - data_range[0]), positions))
colors = list(map(norm_color, colors)) # Normalize color values for colormap creation.
positions = [0.0] + positions + [1.0]
cdict = {}
# These are fully-saturated red, green, and blue - not the matplotlib colors for 'red', 'green', and 'blue'.
primary_colors = ['red', 'green', 'blue']
# Get the 3-tuples of rgb values for the colors.
color_rgbs = [(mpl.colors.to_rgb(color) if isinstance(color, str) else color) for color in colors]
cdict = {'red': [], 'green': [], 'blue': []}
for pos, color in zip(positions, color_rgbs):
cdict['red'].append((pos, color[0], color[0]))
cdict['green'].append((pos, color[1], color[1]))
cdict['blue'].append((pos, color[2], color[2]))
return LinearSegmentedColormap(cmap_name, cdict) | [
"def",
"create_gradient_color_map",
"(",
"data_range",
",",
"colors",
",",
"positions",
"=",
"None",
",",
"cmap_name",
"=",
"'my_cmap'",
")",
":",
"# Normalize position values based on the data range.",
"if",
"positions",
"is",
"None",
":",
"range_size",
"=",
"data_range",
"[",
"1",
"]",
"-",
"data_range",
"[",
"0",
"]",
"spacing",
"=",
"range_size",
"/",
"(",
"len",
"(",
"colors",
")",
"-",
"1",
")",
"positions",
"=",
"[",
"spacing",
"*",
"i",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"colors",
")",
"-",
"1",
")",
"]",
"else",
":",
"positions",
"=",
"list",
"(",
"map",
"(",
"lambda",
"val",
":",
"(",
"val",
"-",
"data_range",
"[",
"0",
"]",
")",
"/",
"(",
"data_range",
"[",
"1",
"]",
"-",
"data_range",
"[",
"0",
"]",
")",
",",
"positions",
")",
")",
"colors",
"=",
"list",
"(",
"map",
"(",
"norm_color",
",",
"colors",
")",
")",
"# Normalize color values for colormap creation.",
"positions",
"=",
"[",
"0.0",
"]",
"+",
"positions",
"+",
"[",
"1.0",
"]",
"cdict",
"=",
"{",
"}",
"# These are fully-saturated red, green, and blue - not the matplotlib colors for 'red', 'green', and 'blue'.",
"primary_colors",
"=",
"[",
"'red'",
",",
"'green'",
",",
"'blue'",
"]",
"# Get the 3-tuples of rgb values for the colors.",
"color_rgbs",
"=",
"[",
"(",
"mpl",
".",
"colors",
".",
"to_rgb",
"(",
"color",
")",
"if",
"isinstance",
"(",
"color",
",",
"str",
")",
"else",
"color",
")",
"for",
"color",
"in",
"colors",
"]",
"cdict",
"=",
"{",
"'red'",
":",
"[",
"]",
",",
"'green'",
":",
"[",
"]",
",",
"'blue'",
":",
"[",
"]",
"}",
"for",
"pos",
",",
"color",
"in",
"zip",
"(",
"positions",
",",
"color_rgbs",
")",
":",
"cdict",
"[",
"'red'",
"]",
".",
"append",
"(",
"(",
"pos",
",",
"color",
"[",
"0",
"]",
",",
"color",
"[",
"0",
"]",
")",
")",
"cdict",
"[",
"'green'",
"]",
".",
"append",
"(",
"(",
"pos",
",",
"color",
"[",
"1",
"]",
",",
"color",
"[",
"1",
"]",
")",
")",
"cdict",
"[",
"'blue'",
"]",
".",
"append",
"(",
"(",
"pos",
",",
"color",
"[",
"2",
"]",
",",
"color",
"[",
"2",
"]",
")",
")",
"return",
"LinearSegmentedColormap",
"(",
"cmap_name",
",",
"cdict",
")"
] | [
1187,
0
] | [
1234,
52
] | python | en | ['en', 'error', 'th'] | False |
binary_class_change_plot | (dataarrays, clean_masks=None, x_coord='longitude', y_coord='latitude',
colors=None, override_mask=None, override_color=None,
neg_trans=False, pos_trans=False,
class_legend_label=None, width=10, fig=None, ax=None,
fig_kwargs={}, title_kwargs={}, imshow_kwargs={},
x_label_kwargs={}, y_label_kwargs={}, legend_kwargs={},
create_stats_table=True, create_change_matrix=True,
denoise=True, denoise_params=None) |
Creates a figure showing one of the following, depending on the format of arguments:
1. The change in the extents of a binary pixel classification in a region over time.
Pixels are colored based on never, sometimes, or always being a member of the class.
In this case, there are 3 regions - never, sometimes, and always.
2. The change in the extents of a binary pixel classification in a region over time between
two time periods. Pixels are colored based on a change in having zero or more than zero
times in which they are members of the class between the time periods.
In this case, there are 4 regions - (never,never),(never,some),(some,never),(some,some).
Parameters
----------
dataarrays: list-like of xarray.DataArray
A list-like of one or two DataArrays of classification values
to plot, which must be either 0 or 1.
clean_masks: list-like of xarray.DataArray
A list-like of one or two DataArrays of boolean values denoting
where the `xarray.DataArray` objects in `dataarrays` are considered
clean. Any non-clean values in `dataarrays` will be ignored.
If specifed, every entry in `datarrays` must have a corresponding entry in `clean_masks`.
If this argument is not supplied (i.e. is `None`), all values will be
considered to be clean.
x_coord, y_coord: str
Names of the x and y coordinates in the elements of `dataarrays` to use
as tick and axis labels.
colors: list-like
A list-like of matplotlib colors - whether string names of
matplotlib colors (like 'red'), or list-likes of rgba values in range [0,255].
If `dataarrays` contains one DataArray, provide 3 color entries -
for never, sometimes, and always class membership, in that order.
If `dataarrays` contains two DataArrays, provide 4 color entires -
for transitions betwen never and sometimes/always class membership
between the two time periods. These transitions are, in order,
(never,never),(never,some),(some,never),(some,some).
override_mask: numpy.ndarray
A NumPy array of the same shape as the dataarrays.
The pixels for which it is `True` are colored `override_color`.
override_color: str or list of rgba values
The color to use for `override_mask`. Can be a string name of a matplotlib color
or a list-like of rgba values (not rgb). By default, it is transparency.
neg_trans: bool
Whether to make pixels that are never a member of the class transparent.
pos_trans: bool
Whether to make pixels that are always a member of the class transparent.
class_legend_label: str
The class label on the legend. For example, `class_legend_label='Water'` would yield
legend labels like "Never Water".
width: numeric
The width of the created ``matplotlib.figure.Figure``, if none is supplied in `fig`.
The height will be set to maintain aspect ratio.
Will be overridden by `'figsize'` in `fig_kwargs`, if present.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
title_kwargs: dict
The dictionary of keyword arguments used to format the title.
Passed to `matplotlib.axes.Axes.set_title()`.
Set the title text with a 'label' keyword argument.
imshow_kwargs: dict
The dictionary of keyword arguments passed to `ax.imshow()`.
You can pass a colormap here with the key 'cmap'.
x_label_kwargs, y_label_kwargs: dict
Dictionaries of keyword arguments for
`Axes.set_xlabel()` and `Axes.set_ylabel()`, respectively.
They cannot reference the same dictionary.
legend_kwargs: dict
The dictionary of keyword arguments passed to `ax.legend()`.
create_stats_table: bool
Whether to create a table of statistics showing the number and percent
of pixels in each category of membership.
create_change_matrix: bool
Wheter to create a 3x3 change matrix showing the number and percent of pixels
that experience each possible transition between never, sometimes, and always
a member of the class between the baseline and analysis time periods.
Only considered if `len(dataarrays) == 2`.
denoise: bool
Whether to denoise the output image.
denoise_params: dict
Dictionary of keyword arguments for
`utils.data_cube_utilites.raster_filter.lone_object_filter()`.
See that function's docstring for information about its parameters.
Returns
-------
(fig,ax): tuple
A 2-tuple of the figure and axes used to create the figure.
stats: tuple
Only returned if `create_stats_table == True` or `create_change_matrix == True`.
If `create_stats_table == True`, `stats` includes a `pandas.DataFrame` containing
the number and percent of pixels in each category of membership,
with the categories depending on whether `dataarrays` contains one or two DataArrays.
* If `dataarrays` contains one DataArray, there are 4 rows for never, sometimes,
always, and unknown (due to `clean_masks`) class membership.
* If `dataarrays` contains two DataArrays, there are 6 rows for the transitions
(never,never), (never,some), (some,never), (some,some), the net change
((never,some) + (some,never)), and unknown.
If `len(dataarrays == 2) and create_change_matrix == True`, `stats` includes
an `xarray.Dataset` containing the number and percent of pixels in each possible
transition between never, sometimes, and always a member of the class between
the baseline and analysis time periods. The number and percent are each a
data variable of the `xarray.Dataset`.
If a stats table and a change matrix are both created, they will be returned in that order.
:Authors:
John Rattz ([email protected])
|
Creates a figure showing one of the following, depending on the format of arguments:
1. The change in the extents of a binary pixel classification in a region over time.
Pixels are colored based on never, sometimes, or always being a member of the class.
In this case, there are 3 regions - never, sometimes, and always.
2. The change in the extents of a binary pixel classification in a region over time between
two time periods. Pixels are colored based on a change in having zero or more than zero
times in which they are members of the class between the time periods.
In this case, there are 4 regions - (never,never),(never,some),(some,never),(some,some). | def binary_class_change_plot(dataarrays, clean_masks=None, x_coord='longitude', y_coord='latitude',
colors=None, override_mask=None, override_color=None,
neg_trans=False, pos_trans=False,
class_legend_label=None, width=10, fig=None, ax=None,
fig_kwargs={}, title_kwargs={}, imshow_kwargs={},
x_label_kwargs={}, y_label_kwargs={}, legend_kwargs={},
create_stats_table=True, create_change_matrix=True,
denoise=True, denoise_params=None):
"""
Creates a figure showing one of the following, depending on the format of arguments:
1. The change in the extents of a binary pixel classification in a region over time.
Pixels are colored based on never, sometimes, or always being a member of the class.
In this case, there are 3 regions - never, sometimes, and always.
2. The change in the extents of a binary pixel classification in a region over time between
two time periods. Pixels are colored based on a change in having zero or more than zero
times in which they are members of the class between the time periods.
In this case, there are 4 regions - (never,never),(never,some),(some,never),(some,some).
Parameters
----------
dataarrays: list-like of xarray.DataArray
A list-like of one or two DataArrays of classification values
to plot, which must be either 0 or 1.
clean_masks: list-like of xarray.DataArray
A list-like of one or two DataArrays of boolean values denoting
where the `xarray.DataArray` objects in `dataarrays` are considered
clean. Any non-clean values in `dataarrays` will be ignored.
If specifed, every entry in `datarrays` must have a corresponding entry in `clean_masks`.
If this argument is not supplied (i.e. is `None`), all values will be
considered to be clean.
x_coord, y_coord: str
Names of the x and y coordinates in the elements of `dataarrays` to use
as tick and axis labels.
colors: list-like
A list-like of matplotlib colors - whether string names of
matplotlib colors (like 'red'), or list-likes of rgba values in range [0,255].
If `dataarrays` contains one DataArray, provide 3 color entries -
for never, sometimes, and always class membership, in that order.
If `dataarrays` contains two DataArrays, provide 4 color entires -
for transitions betwen never and sometimes/always class membership
between the two time periods. These transitions are, in order,
(never,never),(never,some),(some,never),(some,some).
override_mask: numpy.ndarray
A NumPy array of the same shape as the dataarrays.
The pixels for which it is `True` are colored `override_color`.
override_color: str or list of rgba values
The color to use for `override_mask`. Can be a string name of a matplotlib color
or a list-like of rgba values (not rgb). By default, it is transparency.
neg_trans: bool
Whether to make pixels that are never a member of the class transparent.
pos_trans: bool
Whether to make pixels that are always a member of the class transparent.
class_legend_label: str
The class label on the legend. For example, `class_legend_label='Water'` would yield
legend labels like "Never Water".
width: numeric
The width of the created ``matplotlib.figure.Figure``, if none is supplied in `fig`.
The height will be set to maintain aspect ratio.
Will be overridden by `'figsize'` in `fig_kwargs`, if present.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
title_kwargs: dict
The dictionary of keyword arguments used to format the title.
Passed to `matplotlib.axes.Axes.set_title()`.
Set the title text with a 'label' keyword argument.
imshow_kwargs: dict
The dictionary of keyword arguments passed to `ax.imshow()`.
You can pass a colormap here with the key 'cmap'.
x_label_kwargs, y_label_kwargs: dict
Dictionaries of keyword arguments for
`Axes.set_xlabel()` and `Axes.set_ylabel()`, respectively.
They cannot reference the same dictionary.
legend_kwargs: dict
The dictionary of keyword arguments passed to `ax.legend()`.
create_stats_table: bool
Whether to create a table of statistics showing the number and percent
of pixels in each category of membership.
create_change_matrix: bool
Wheter to create a 3x3 change matrix showing the number and percent of pixels
that experience each possible transition between never, sometimes, and always
a member of the class between the baseline and analysis time periods.
Only considered if `len(dataarrays) == 2`.
denoise: bool
Whether to denoise the output image.
denoise_params: dict
Dictionary of keyword arguments for
`utils.data_cube_utilites.raster_filter.lone_object_filter()`.
See that function's docstring for information about its parameters.
Returns
-------
(fig,ax): tuple
A 2-tuple of the figure and axes used to create the figure.
stats: tuple
Only returned if `create_stats_table == True` or `create_change_matrix == True`.
If `create_stats_table == True`, `stats` includes a `pandas.DataFrame` containing
the number and percent of pixels in each category of membership,
with the categories depending on whether `dataarrays` contains one or two DataArrays.
* If `dataarrays` contains one DataArray, there are 4 rows for never, sometimes,
always, and unknown (due to `clean_masks`) class membership.
* If `dataarrays` contains two DataArrays, there are 6 rows for the transitions
(never,never), (never,some), (some,never), (some,some), the net change
((never,some) + (some,never)), and unknown.
If `len(dataarrays == 2) and create_change_matrix == True`, `stats` includes
an `xarray.Dataset` containing the number and percent of pixels in each possible
transition between never, sometimes, and always a member of the class between
the baseline and analysis time periods. The number and percent are each a
data variable of the `xarray.Dataset`.
If a stats table and a change matrix are both created, they will be returned in that order.
:Authors:
John Rattz ([email protected])
"""
if clean_masks is None:
clean_masks = [xr.DataArray(np.ones(dataarray.shape, dtype=np.bool),
coords=dataarray.coords, dims=dataarray.dims)
for dataarray in dataarrays]
denoise_params = {} if denoise_params is None and denoise else \
denoise_params
# Avoid modifying the original arguments.
fig_kwargs, title_kwargs, legend_kwargs = \
fig_kwargs.copy(), title_kwargs.copy(), legend_kwargs.copy()
# Handle conversion of matplotlib color names to lists of rgb values (range [0,255] for plt.imshow()).
colors = list(map(convert_name_rgba_255, colors))
override_color = convert_name_rgba_255(override_color) if override_color is not None else [0, 0, 0, 0]
def get_none_chng_perm_masks(dataarray, clean_mask, time_dim='time'):
"""
For a DataArray of binary classifications (0 or 1) with a time dimension,
get a list of masks indicating where the points are, in order, never, sometimes,
or always a member of the class (1 indicates membership), considering only
non-NaN values for those points.
"""
time_axis = dataarray.get_axis_num(time_dim)
# Get the mean classification across time.
masked_da = np.ma.array(dataarray.values, mask=~clean_mask.values)
frac_cls = masked_da.mean(axis=time_axis)
# Find where pixels are permanent, changing, or never a member of the class.
none_mask = (frac_cls == 0).filled(False)
chng_mask = (0 < frac_cls).filled(False) & (frac_cls < 1).filled(False)
perm_mask = (1 == frac_cls).filled(False)
return [none_mask, chng_mask, perm_mask]
# Assemble the color masks.
masks = []
if len(dataarrays) == 1: # Determine extent change in one time period.
dataarray = dataarrays[0]
clean_mask = clean_masks[0]
masks += get_none_chng_perm_masks(dataarray, clean_mask)
else: # Determine change between two time periods.
baseline_da, analysis_da = dataarrays
baseline_clean_mask = clean_masks[0] if clean_masks is not None else None
analysis_clean_mask = clean_masks[1] if clean_masks is not None else None
baseline_none_mask, baseline_chng_mask, baseline_perm_mask = get_none_chng_perm_masks(baseline_da,
baseline_clean_mask)
analysis_none_mask, analysis_chng_mask, analysis_perm_mask = get_none_chng_perm_masks(analysis_da,
analysis_clean_mask)
# Find where points are never a member of the class or are a member at one or more times.
baseline_cls_ever = baseline_chng_mask | baseline_perm_mask
analysis_cls_ever = analysis_chng_mask | analysis_perm_mask
# Find where points change between never being a member of the class
# and being a member at one or more times between the two periods.
no_cls_no_cls_mask = baseline_none_mask & analysis_none_mask
no_cls_cls_mask = baseline_none_mask & analysis_cls_ever
cls_no_cls_mask = baseline_cls_ever & analysis_none_mask
cls_cls_mask = baseline_cls_ever & analysis_cls_ever
masks += [no_cls_no_cls_mask, no_cls_cls_mask, cls_no_cls_mask, cls_cls_mask]
# Determine the overriding mask.
y_x_shape = len(dataarrays[0][y_coord]), len(dataarrays[0][x_coord])
override_mask = np.zeros(y_x_shape, dtype=np.bool) if override_mask is None else override_mask
# Create an array of integer-encoded change-class values.
cls_cng_arr = np.zeros(y_x_shape, dtype=np.uint8)
for i, mask in enumerate(masks):
cls_cng_arr[mask] = i
# Denoise the class change image (optional).
if denoise:
cls_cng_arr = lone_object_filter(cls_cng_arr, **denoise_params)
# Color the image with the masks.
# Initialize pixels as white.
transparency_mask = np.zeros(y_x_shape, dtype=np.bool)
color_array = np.full((*y_x_shape, 4), 255, dtype=np.uint8)
for i in range(len(masks)):
if (neg_trans and i == 0) or (pos_trans and i == len(masks) - 1):
transparency_mask[cls_cng_arr == i] = True
color_array[cls_cng_arr == i] = colors[i]
if neg_trans or pos_trans:
color_array[transparency_mask] = [0, 0, 0, 0]
color_array[override_mask] = override_color
fig_kwargs['figsize'] = fig_kwargs.get('figsize', figure_ratio(dataarrays[0], x_coord, y_coord,
fixed_width=width))
fig, ax = retrieve_or_create_fig_ax(fig, ax, **fig_kwargs)
# Set the tick and axes labels.
xarray_set_axes_labels(dataarrays[0], ax, x_coord, y_coord, x_label_kwargs, y_label_kwargs)
# Title the plot.
title_kwargs.setdefault('label', "Class Extents" if len(dataarrays) == 1 else \
"Class Extents Change (Baseline/Analysis)")
ax.set_title(**title_kwargs)
# Create the legend.
# Colors must be in range [0,1] for color patches.
colors = [np.array(color) / 255 for color in colors]
if len(dataarrays) == 1:
class_legend_label = "a Member of the Class" if class_legend_label is None else class_legend_label
labels = list(map(lambda str: str.format(class_legend_label),
['Never {}', 'Sometimes {}', 'Always {}']))
else:
class_legend_label = "Class Membership" if class_legend_label is None else class_legend_label
labels = list(map(lambda str: str.format(class_legend_label, class_legend_label),
['No {} to No {}', 'No {} to {}', '{} to No {}', '{} to {}']))
color_patches = list(map(lambda color, label: mpatches.Patch(color=color, label=label), colors, labels))
legend_kwargs.setdefault('loc', 'best')
legend_kwargs['handles'] = color_patches
ax.legend(**legend_kwargs)
ax.imshow(color_array, **imshow_kwargs)
if create_stats_table or create_change_matrix:
stats_data = []
if create_stats_table:
num_table_rows = 4 if len(dataarrays) == 1 else 6
index = labels + ['Unknown'] if len(dataarrays) == 1 else \
labels + ['Net Change'] + ['Unknown']
stats_table = pd.DataFrame(data=np.zeros((num_table_rows, 2)),
index=index, columns=['Number', 'Percent'])
# Number
num_insufficient_data = ~masks[0]
for i in range(1, len(masks)):
num_insufficient_data = num_insufficient_data & ~masks[i]
num_insufficient_data = num_insufficient_data.sum()
mask_sums = np.array([mask.sum() for mask in masks])
if len(dataarrays) == 1:
stats_table.iloc[:, 0] = np.concatenate((mask_sums, np.array([num_insufficient_data])))
else:
stats_table.iloc[:, 0] = np.concatenate(
(mask_sums, np.array([mask_sums[[1, 2]].sum()]), np.array([num_insufficient_data])))
# Percent
stats_table.iloc[:, 1] = stats_table.iloc[:, 0] / (y_x_shape[0] * y_x_shape[1])
stats_data.append(stats_table)
if len(dataarrays) == 2 and create_change_matrix:
dims = ['baseline', 'analysis']
classes = ['always', 'sometimes', 'never']
coords = {'baseline': classes, 'analysis': classes}
# Number
num_px_trans_da = xr.DataArray(np.zeros((3, 3), dtype=np.uint64),
dims=dims, coords=coords)
baseline_dict = OrderedDict([('always', baseline_perm_mask),
('sometimes', baseline_chng_mask),
('never', baseline_none_mask)])
analysis_dict = OrderedDict([('always', analysis_perm_mask),
('sometimes', analysis_chng_mask),
('never', analysis_none_mask)])
for baseline_cls, baseline_cls_mask in baseline_dict.items():
num_px_trans_da.sel(dict(baseline=baseline_cls)).values[:] = \
np.array([((baseline_cls_mask == 1) & (analysis_cls_mask == 1)).sum()
for analysis_cls_mask in analysis_dict.values()])
# Percent
percent_px_trans_da = num_px_trans_da / (y_x_shape[0] * y_x_shape[1])
stats_data.append(xr.Dataset(data_vars=dict(Number=num_px_trans_da,
Percent=percent_px_trans_da)))
stats_data = tuple(stats_data)
if create_stats_table or create_change_matrix:
return (fig, ax), stats_data
else:
return (fig, ax) | [
"def",
"binary_class_change_plot",
"(",
"dataarrays",
",",
"clean_masks",
"=",
"None",
",",
"x_coord",
"=",
"'longitude'",
",",
"y_coord",
"=",
"'latitude'",
",",
"colors",
"=",
"None",
",",
"override_mask",
"=",
"None",
",",
"override_color",
"=",
"None",
",",
"neg_trans",
"=",
"False",
",",
"pos_trans",
"=",
"False",
",",
"class_legend_label",
"=",
"None",
",",
"width",
"=",
"10",
",",
"fig",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"fig_kwargs",
"=",
"{",
"}",
",",
"title_kwargs",
"=",
"{",
"}",
",",
"imshow_kwargs",
"=",
"{",
"}",
",",
"x_label_kwargs",
"=",
"{",
"}",
",",
"y_label_kwargs",
"=",
"{",
"}",
",",
"legend_kwargs",
"=",
"{",
"}",
",",
"create_stats_table",
"=",
"True",
",",
"create_change_matrix",
"=",
"True",
",",
"denoise",
"=",
"True",
",",
"denoise_params",
"=",
"None",
")",
":",
"if",
"clean_masks",
"is",
"None",
":",
"clean_masks",
"=",
"[",
"xr",
".",
"DataArray",
"(",
"np",
".",
"ones",
"(",
"dataarray",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
",",
"coords",
"=",
"dataarray",
".",
"coords",
",",
"dims",
"=",
"dataarray",
".",
"dims",
")",
"for",
"dataarray",
"in",
"dataarrays",
"]",
"denoise_params",
"=",
"{",
"}",
"if",
"denoise_params",
"is",
"None",
"and",
"denoise",
"else",
"denoise_params",
"# Avoid modifying the original arguments.",
"fig_kwargs",
",",
"title_kwargs",
",",
"legend_kwargs",
"=",
"fig_kwargs",
".",
"copy",
"(",
")",
",",
"title_kwargs",
".",
"copy",
"(",
")",
",",
"legend_kwargs",
".",
"copy",
"(",
")",
"# Handle conversion of matplotlib color names to lists of rgb values (range [0,255] for plt.imshow()).",
"colors",
"=",
"list",
"(",
"map",
"(",
"convert_name_rgba_255",
",",
"colors",
")",
")",
"override_color",
"=",
"convert_name_rgba_255",
"(",
"override_color",
")",
"if",
"override_color",
"is",
"not",
"None",
"else",
"[",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
"def",
"get_none_chng_perm_masks",
"(",
"dataarray",
",",
"clean_mask",
",",
"time_dim",
"=",
"'time'",
")",
":",
"\"\"\"\n For a DataArray of binary classifications (0 or 1) with a time dimension,\n get a list of masks indicating where the points are, in order, never, sometimes,\n or always a member of the class (1 indicates membership), considering only\n non-NaN values for those points.\n \"\"\"",
"time_axis",
"=",
"dataarray",
".",
"get_axis_num",
"(",
"time_dim",
")",
"# Get the mean classification across time.",
"masked_da",
"=",
"np",
".",
"ma",
".",
"array",
"(",
"dataarray",
".",
"values",
",",
"mask",
"=",
"~",
"clean_mask",
".",
"values",
")",
"frac_cls",
"=",
"masked_da",
".",
"mean",
"(",
"axis",
"=",
"time_axis",
")",
"# Find where pixels are permanent, changing, or never a member of the class.",
"none_mask",
"=",
"(",
"frac_cls",
"==",
"0",
")",
".",
"filled",
"(",
"False",
")",
"chng_mask",
"=",
"(",
"0",
"<",
"frac_cls",
")",
".",
"filled",
"(",
"False",
")",
"&",
"(",
"frac_cls",
"<",
"1",
")",
".",
"filled",
"(",
"False",
")",
"perm_mask",
"=",
"(",
"1",
"==",
"frac_cls",
")",
".",
"filled",
"(",
"False",
")",
"return",
"[",
"none_mask",
",",
"chng_mask",
",",
"perm_mask",
"]",
"# Assemble the color masks.",
"masks",
"=",
"[",
"]",
"if",
"len",
"(",
"dataarrays",
")",
"==",
"1",
":",
"# Determine extent change in one time period.",
"dataarray",
"=",
"dataarrays",
"[",
"0",
"]",
"clean_mask",
"=",
"clean_masks",
"[",
"0",
"]",
"masks",
"+=",
"get_none_chng_perm_masks",
"(",
"dataarray",
",",
"clean_mask",
")",
"else",
":",
"# Determine change between two time periods.",
"baseline_da",
",",
"analysis_da",
"=",
"dataarrays",
"baseline_clean_mask",
"=",
"clean_masks",
"[",
"0",
"]",
"if",
"clean_masks",
"is",
"not",
"None",
"else",
"None",
"analysis_clean_mask",
"=",
"clean_masks",
"[",
"1",
"]",
"if",
"clean_masks",
"is",
"not",
"None",
"else",
"None",
"baseline_none_mask",
",",
"baseline_chng_mask",
",",
"baseline_perm_mask",
"=",
"get_none_chng_perm_masks",
"(",
"baseline_da",
",",
"baseline_clean_mask",
")",
"analysis_none_mask",
",",
"analysis_chng_mask",
",",
"analysis_perm_mask",
"=",
"get_none_chng_perm_masks",
"(",
"analysis_da",
",",
"analysis_clean_mask",
")",
"# Find where points are never a member of the class or are a member at one or more times.",
"baseline_cls_ever",
"=",
"baseline_chng_mask",
"|",
"baseline_perm_mask",
"analysis_cls_ever",
"=",
"analysis_chng_mask",
"|",
"analysis_perm_mask",
"# Find where points change between never being a member of the class",
"# and being a member at one or more times between the two periods.",
"no_cls_no_cls_mask",
"=",
"baseline_none_mask",
"&",
"analysis_none_mask",
"no_cls_cls_mask",
"=",
"baseline_none_mask",
"&",
"analysis_cls_ever",
"cls_no_cls_mask",
"=",
"baseline_cls_ever",
"&",
"analysis_none_mask",
"cls_cls_mask",
"=",
"baseline_cls_ever",
"&",
"analysis_cls_ever",
"masks",
"+=",
"[",
"no_cls_no_cls_mask",
",",
"no_cls_cls_mask",
",",
"cls_no_cls_mask",
",",
"cls_cls_mask",
"]",
"# Determine the overriding mask.",
"y_x_shape",
"=",
"len",
"(",
"dataarrays",
"[",
"0",
"]",
"[",
"y_coord",
"]",
")",
",",
"len",
"(",
"dataarrays",
"[",
"0",
"]",
"[",
"x_coord",
"]",
")",
"override_mask",
"=",
"np",
".",
"zeros",
"(",
"y_x_shape",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"if",
"override_mask",
"is",
"None",
"else",
"override_mask",
"# Create an array of integer-encoded change-class values.",
"cls_cng_arr",
"=",
"np",
".",
"zeros",
"(",
"y_x_shape",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"for",
"i",
",",
"mask",
"in",
"enumerate",
"(",
"masks",
")",
":",
"cls_cng_arr",
"[",
"mask",
"]",
"=",
"i",
"# Denoise the class change image (optional).",
"if",
"denoise",
":",
"cls_cng_arr",
"=",
"lone_object_filter",
"(",
"cls_cng_arr",
",",
"*",
"*",
"denoise_params",
")",
"# Color the image with the masks.",
"# Initialize pixels as white.",
"transparency_mask",
"=",
"np",
".",
"zeros",
"(",
"y_x_shape",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"color_array",
"=",
"np",
".",
"full",
"(",
"(",
"*",
"y_x_shape",
",",
"4",
")",
",",
"255",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"masks",
")",
")",
":",
"if",
"(",
"neg_trans",
"and",
"i",
"==",
"0",
")",
"or",
"(",
"pos_trans",
"and",
"i",
"==",
"len",
"(",
"masks",
")",
"-",
"1",
")",
":",
"transparency_mask",
"[",
"cls_cng_arr",
"==",
"i",
"]",
"=",
"True",
"color_array",
"[",
"cls_cng_arr",
"==",
"i",
"]",
"=",
"colors",
"[",
"i",
"]",
"if",
"neg_trans",
"or",
"pos_trans",
":",
"color_array",
"[",
"transparency_mask",
"]",
"=",
"[",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
"color_array",
"[",
"override_mask",
"]",
"=",
"override_color",
"fig_kwargs",
"[",
"'figsize'",
"]",
"=",
"fig_kwargs",
".",
"get",
"(",
"'figsize'",
",",
"figure_ratio",
"(",
"dataarrays",
"[",
"0",
"]",
",",
"x_coord",
",",
"y_coord",
",",
"fixed_width",
"=",
"width",
")",
")",
"fig",
",",
"ax",
"=",
"retrieve_or_create_fig_ax",
"(",
"fig",
",",
"ax",
",",
"*",
"*",
"fig_kwargs",
")",
"# Set the tick and axes labels.",
"xarray_set_axes_labels",
"(",
"dataarrays",
"[",
"0",
"]",
",",
"ax",
",",
"x_coord",
",",
"y_coord",
",",
"x_label_kwargs",
",",
"y_label_kwargs",
")",
"# Title the plot.",
"title_kwargs",
".",
"setdefault",
"(",
"'label'",
",",
"\"Class Extents\"",
"if",
"len",
"(",
"dataarrays",
")",
"==",
"1",
"else",
"\"Class Extents Change (Baseline/Analysis)\"",
")",
"ax",
".",
"set_title",
"(",
"*",
"*",
"title_kwargs",
")",
"# Create the legend.",
"# Colors must be in range [0,1] for color patches.",
"colors",
"=",
"[",
"np",
".",
"array",
"(",
"color",
")",
"/",
"255",
"for",
"color",
"in",
"colors",
"]",
"if",
"len",
"(",
"dataarrays",
")",
"==",
"1",
":",
"class_legend_label",
"=",
"\"a Member of the Class\"",
"if",
"class_legend_label",
"is",
"None",
"else",
"class_legend_label",
"labels",
"=",
"list",
"(",
"map",
"(",
"lambda",
"str",
":",
"str",
".",
"format",
"(",
"class_legend_label",
")",
",",
"[",
"'Never {}'",
",",
"'Sometimes {}'",
",",
"'Always {}'",
"]",
")",
")",
"else",
":",
"class_legend_label",
"=",
"\"Class Membership\"",
"if",
"class_legend_label",
"is",
"None",
"else",
"class_legend_label",
"labels",
"=",
"list",
"(",
"map",
"(",
"lambda",
"str",
":",
"str",
".",
"format",
"(",
"class_legend_label",
",",
"class_legend_label",
")",
",",
"[",
"'No {} to No {}'",
",",
"'No {} to {}'",
",",
"'{} to No {}'",
",",
"'{} to {}'",
"]",
")",
")",
"color_patches",
"=",
"list",
"(",
"map",
"(",
"lambda",
"color",
",",
"label",
":",
"mpatches",
".",
"Patch",
"(",
"color",
"=",
"color",
",",
"label",
"=",
"label",
")",
",",
"colors",
",",
"labels",
")",
")",
"legend_kwargs",
".",
"setdefault",
"(",
"'loc'",
",",
"'best'",
")",
"legend_kwargs",
"[",
"'handles'",
"]",
"=",
"color_patches",
"ax",
".",
"legend",
"(",
"*",
"*",
"legend_kwargs",
")",
"ax",
".",
"imshow",
"(",
"color_array",
",",
"*",
"*",
"imshow_kwargs",
")",
"if",
"create_stats_table",
"or",
"create_change_matrix",
":",
"stats_data",
"=",
"[",
"]",
"if",
"create_stats_table",
":",
"num_table_rows",
"=",
"4",
"if",
"len",
"(",
"dataarrays",
")",
"==",
"1",
"else",
"6",
"index",
"=",
"labels",
"+",
"[",
"'Unknown'",
"]",
"if",
"len",
"(",
"dataarrays",
")",
"==",
"1",
"else",
"labels",
"+",
"[",
"'Net Change'",
"]",
"+",
"[",
"'Unknown'",
"]",
"stats_table",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
"=",
"np",
".",
"zeros",
"(",
"(",
"num_table_rows",
",",
"2",
")",
")",
",",
"index",
"=",
"index",
",",
"columns",
"=",
"[",
"'Number'",
",",
"'Percent'",
"]",
")",
"# Number",
"num_insufficient_data",
"=",
"~",
"masks",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"masks",
")",
")",
":",
"num_insufficient_data",
"=",
"num_insufficient_data",
"&",
"~",
"masks",
"[",
"i",
"]",
"num_insufficient_data",
"=",
"num_insufficient_data",
".",
"sum",
"(",
")",
"mask_sums",
"=",
"np",
".",
"array",
"(",
"[",
"mask",
".",
"sum",
"(",
")",
"for",
"mask",
"in",
"masks",
"]",
")",
"if",
"len",
"(",
"dataarrays",
")",
"==",
"1",
":",
"stats_table",
".",
"iloc",
"[",
":",
",",
"0",
"]",
"=",
"np",
".",
"concatenate",
"(",
"(",
"mask_sums",
",",
"np",
".",
"array",
"(",
"[",
"num_insufficient_data",
"]",
")",
")",
")",
"else",
":",
"stats_table",
".",
"iloc",
"[",
":",
",",
"0",
"]",
"=",
"np",
".",
"concatenate",
"(",
"(",
"mask_sums",
",",
"np",
".",
"array",
"(",
"[",
"mask_sums",
"[",
"[",
"1",
",",
"2",
"]",
"]",
".",
"sum",
"(",
")",
"]",
")",
",",
"np",
".",
"array",
"(",
"[",
"num_insufficient_data",
"]",
")",
")",
")",
"# Percent",
"stats_table",
".",
"iloc",
"[",
":",
",",
"1",
"]",
"=",
"stats_table",
".",
"iloc",
"[",
":",
",",
"0",
"]",
"/",
"(",
"y_x_shape",
"[",
"0",
"]",
"*",
"y_x_shape",
"[",
"1",
"]",
")",
"stats_data",
".",
"append",
"(",
"stats_table",
")",
"if",
"len",
"(",
"dataarrays",
")",
"==",
"2",
"and",
"create_change_matrix",
":",
"dims",
"=",
"[",
"'baseline'",
",",
"'analysis'",
"]",
"classes",
"=",
"[",
"'always'",
",",
"'sometimes'",
",",
"'never'",
"]",
"coords",
"=",
"{",
"'baseline'",
":",
"classes",
",",
"'analysis'",
":",
"classes",
"}",
"# Number",
"num_px_trans_da",
"=",
"xr",
".",
"DataArray",
"(",
"np",
".",
"zeros",
"(",
"(",
"3",
",",
"3",
")",
",",
"dtype",
"=",
"np",
".",
"uint64",
")",
",",
"dims",
"=",
"dims",
",",
"coords",
"=",
"coords",
")",
"baseline_dict",
"=",
"OrderedDict",
"(",
"[",
"(",
"'always'",
",",
"baseline_perm_mask",
")",
",",
"(",
"'sometimes'",
",",
"baseline_chng_mask",
")",
",",
"(",
"'never'",
",",
"baseline_none_mask",
")",
"]",
")",
"analysis_dict",
"=",
"OrderedDict",
"(",
"[",
"(",
"'always'",
",",
"analysis_perm_mask",
")",
",",
"(",
"'sometimes'",
",",
"analysis_chng_mask",
")",
",",
"(",
"'never'",
",",
"analysis_none_mask",
")",
"]",
")",
"for",
"baseline_cls",
",",
"baseline_cls_mask",
"in",
"baseline_dict",
".",
"items",
"(",
")",
":",
"num_px_trans_da",
".",
"sel",
"(",
"dict",
"(",
"baseline",
"=",
"baseline_cls",
")",
")",
".",
"values",
"[",
":",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"(",
"baseline_cls_mask",
"==",
"1",
")",
"&",
"(",
"analysis_cls_mask",
"==",
"1",
")",
")",
".",
"sum",
"(",
")",
"for",
"analysis_cls_mask",
"in",
"analysis_dict",
".",
"values",
"(",
")",
"]",
")",
"# Percent",
"percent_px_trans_da",
"=",
"num_px_trans_da",
"/",
"(",
"y_x_shape",
"[",
"0",
"]",
"*",
"y_x_shape",
"[",
"1",
"]",
")",
"stats_data",
".",
"append",
"(",
"xr",
".",
"Dataset",
"(",
"data_vars",
"=",
"dict",
"(",
"Number",
"=",
"num_px_trans_da",
",",
"Percent",
"=",
"percent_px_trans_da",
")",
")",
")",
"stats_data",
"=",
"tuple",
"(",
"stats_data",
")",
"if",
"create_stats_table",
"or",
"create_change_matrix",
":",
"return",
"(",
"fig",
",",
"ax",
")",
",",
"stats_data",
"else",
":",
"return",
"(",
"fig",
",",
"ax",
")"
] | [
1241,
0
] | [
1526,
24
] | python | en | ['en', 'error', 'th'] | False |
intersection_threshold_plot | (first, second, th, mask=None,
color_none='black', color_first='green',
color_second='red', color_both='white',
color_mask='gray',
width=10, fig=None, ax=None,
x_coord='longitude', y_coord='latitude',
*args, **kwargs) |
Given two dataarrays, create a threshold plot showing where zero, one, or both are within a threshold.
Parameters
----------
first, second: xarray.DataArray
The DataArrays to compare.
th: tuple
A 2-tuple of the minimum (inclusive) and maximum (exclusive) threshold values, respectively.
mask: numpy.ndarray
A NumPy array of the same shape as the dataarrays. The pixels for which it is `True`
are colored`color_mask`.
color_none: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
neither first nor second have values within the threshold.
Default color is black.
color_first: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
only the first has values within the threshold.
Default color is green.
color_second: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
only the second has values within the threshold.
Default color is red.
color_both: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
both the first and second have values within the threshold.
Default color is white.
color_mask: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where `mask == True`.
Overrides any other color a region may have.
Default color is gray.
width: int
The width of the created ``matplotlib.figure.Figure``.
The height will be set to maintain aspect ratio.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
*args: list
Arguments passed to ``matplotlib.pyplot.imshow()``.
**kwargs: dict
Keyword arguments passed to ``matplotlib.pyplot.imshow()``.
|
Given two dataarrays, create a threshold plot showing where zero, one, or both are within a threshold. | def intersection_threshold_plot(first, second, th, mask=None,
color_none='black', color_first='green',
color_second='red', color_both='white',
color_mask='gray',
width=10, fig=None, ax=None,
x_coord='longitude', y_coord='latitude',
*args, **kwargs):
"""
Given two dataarrays, create a threshold plot showing where zero, one, or both are within a threshold.
Parameters
----------
first, second: xarray.DataArray
The DataArrays to compare.
th: tuple
A 2-tuple of the minimum (inclusive) and maximum (exclusive) threshold values, respectively.
mask: numpy.ndarray
A NumPy array of the same shape as the dataarrays. The pixels for which it is `True`
are colored`color_mask`.
color_none: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
neither first nor second have values within the threshold.
Default color is black.
color_first: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
only the first has values within the threshold.
Default color is green.
color_second: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
only the second has values within the threshold.
Default color is red.
color_both: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
both the first and second have values within the threshold.
Default color is white.
color_mask: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where `mask == True`.
Overrides any other color a region may have.
Default color is gray.
width: int
The width of the created ``matplotlib.figure.Figure``.
The height will be set to maintain aspect ratio.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
*args: list
Arguments passed to ``matplotlib.pyplot.imshow()``.
**kwargs: dict
Keyword arguments passed to ``matplotlib.pyplot.imshow()``.
"""
# Handle conversion of matplotlib color names to lists of rgb values.
color_none, color_first, color_second, color_both, color_mask = \
list(map(convert_name_rgb_255, [color_none, color_first, color_second, color_both, color_mask]))
# Determine the regions.
first_in = np.logical_and(th[0] <= first, first < th[1])
second_in = np.logical_and(th[0] <= second, second < th[1])
both_in = np.logical_and(first_in, second_in)
none_in = np.invert(both_in)
# Determine the overriding mask.
mask = np.zeros(first.shape).astype(bool) if mask is None else mask
# The colors for each pixel.
color_array = np.zeros((*first.shape, 3)).astype(np.int16)
color_array[none_in] = color_none
color_array[first_in] = color_first
color_array[second_in] = color_second
color_array[both_in] = color_both
color_array[mask] = color_mask
fig, ax = retrieve_or_create_fig_ax(fig, ax, figsize=figure_ratio(first, x_coord, y_coord, fixed_width=width))
plt.title("Threshold: {} < x < {}".format(th[0], th[1]))
max_num_ticks = 10 # Max ticks per axis.
lon = first.longitude.values
label_every = int(round(len(lon) / max_num_ticks))
lon_labels = ["{0:.4f}".format(lon_val) for lon_val in lon[::label_every]]
plt.xlabel('Longitude')
plt.xticks(range(len(lon))[::label_every], lon_labels, rotation='vertical')
lat = first.latitude.values
label_every = int(round(len(lat) / max_num_ticks))
lat_labels = ["{0:.4f}".format(lat_val) for lat_val in lat[::label_every]]
plt.ylabel('Latitude')
plt.yticks(range(len(lat))[::label_every], lat_labels)
plt.imshow(color_array, *args, **kwargs)
return fig, ax | [
"def",
"intersection_threshold_plot",
"(",
"first",
",",
"second",
",",
"th",
",",
"mask",
"=",
"None",
",",
"color_none",
"=",
"'black'",
",",
"color_first",
"=",
"'green'",
",",
"color_second",
"=",
"'red'",
",",
"color_both",
"=",
"'white'",
",",
"color_mask",
"=",
"'gray'",
",",
"width",
"=",
"10",
",",
"fig",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"x_coord",
"=",
"'longitude'",
",",
"y_coord",
"=",
"'latitude'",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Handle conversion of matplotlib color names to lists of rgb values.",
"color_none",
",",
"color_first",
",",
"color_second",
",",
"color_both",
",",
"color_mask",
"=",
"list",
"(",
"map",
"(",
"convert_name_rgb_255",
",",
"[",
"color_none",
",",
"color_first",
",",
"color_second",
",",
"color_both",
",",
"color_mask",
"]",
")",
")",
"# Determine the regions.",
"first_in",
"=",
"np",
".",
"logical_and",
"(",
"th",
"[",
"0",
"]",
"<=",
"first",
",",
"first",
"<",
"th",
"[",
"1",
"]",
")",
"second_in",
"=",
"np",
".",
"logical_and",
"(",
"th",
"[",
"0",
"]",
"<=",
"second",
",",
"second",
"<",
"th",
"[",
"1",
"]",
")",
"both_in",
"=",
"np",
".",
"logical_and",
"(",
"first_in",
",",
"second_in",
")",
"none_in",
"=",
"np",
".",
"invert",
"(",
"both_in",
")",
"# Determine the overriding mask.",
"mask",
"=",
"np",
".",
"zeros",
"(",
"first",
".",
"shape",
")",
".",
"astype",
"(",
"bool",
")",
"if",
"mask",
"is",
"None",
"else",
"mask",
"# The colors for each pixel.",
"color_array",
"=",
"np",
".",
"zeros",
"(",
"(",
"*",
"first",
".",
"shape",
",",
"3",
")",
")",
".",
"astype",
"(",
"np",
".",
"int16",
")",
"color_array",
"[",
"none_in",
"]",
"=",
"color_none",
"color_array",
"[",
"first_in",
"]",
"=",
"color_first",
"color_array",
"[",
"second_in",
"]",
"=",
"color_second",
"color_array",
"[",
"both_in",
"]",
"=",
"color_both",
"color_array",
"[",
"mask",
"]",
"=",
"color_mask",
"fig",
",",
"ax",
"=",
"retrieve_or_create_fig_ax",
"(",
"fig",
",",
"ax",
",",
"figsize",
"=",
"figure_ratio",
"(",
"first",
",",
"x_coord",
",",
"y_coord",
",",
"fixed_width",
"=",
"width",
")",
")",
"plt",
".",
"title",
"(",
"\"Threshold: {} < x < {}\"",
".",
"format",
"(",
"th",
"[",
"0",
"]",
",",
"th",
"[",
"1",
"]",
")",
")",
"max_num_ticks",
"=",
"10",
"# Max ticks per axis.",
"lon",
"=",
"first",
".",
"longitude",
".",
"values",
"label_every",
"=",
"int",
"(",
"round",
"(",
"len",
"(",
"lon",
")",
"/",
"max_num_ticks",
")",
")",
"lon_labels",
"=",
"[",
"\"{0:.4f}\"",
".",
"format",
"(",
"lon_val",
")",
"for",
"lon_val",
"in",
"lon",
"[",
":",
":",
"label_every",
"]",
"]",
"plt",
".",
"xlabel",
"(",
"'Longitude'",
")",
"plt",
".",
"xticks",
"(",
"range",
"(",
"len",
"(",
"lon",
")",
")",
"[",
":",
":",
"label_every",
"]",
",",
"lon_labels",
",",
"rotation",
"=",
"'vertical'",
")",
"lat",
"=",
"first",
".",
"latitude",
".",
"values",
"label_every",
"=",
"int",
"(",
"round",
"(",
"len",
"(",
"lat",
")",
"/",
"max_num_ticks",
")",
")",
"lat_labels",
"=",
"[",
"\"{0:.4f}\"",
".",
"format",
"(",
"lat_val",
")",
"for",
"lat_val",
"in",
"lat",
"[",
":",
":",
"label_every",
"]",
"]",
"plt",
".",
"ylabel",
"(",
"'Latitude'",
")",
"plt",
".",
"yticks",
"(",
"range",
"(",
"len",
"(",
"lat",
")",
")",
"[",
":",
":",
"label_every",
"]",
",",
"lat_labels",
")",
"plt",
".",
"imshow",
"(",
"color_array",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"fig",
",",
"ax"
] | [
1531,
0
] | [
1629,
18
] | python | en | ['en', 'error', 'th'] | False |
print_matrix | (cell_value_mtx, cell_label_mtx=None, row_labels=None, col_labels=None,
show_row_labels=True, show_col_labels=True, show_cell_labels=True,
cmap=None, cell_val_fmt='2g', annot_kwargs={}, tick_fontsize=14,
x_axis_tick_kwargs=None, y_axis_tick_kwargs=None,
x_axis_ticks_position='default', y_axis_ticks_position='default',
fig=None, ax=None, heatmap_kwargs={}, fig_kwargs={}) |
Prints a matrix as a heatmap.
Inspired by https://gist.github.com/shaypal5/94c53d765083101efc0240d776a23823.
Arguments
---------
cell_value_mtx: numpy.ndarray
A 2D NumPy array to be used as the cell values when coloring with the colormap.
cell_label_mtx: numpy.ndarray
A 2D NumPy array to be used as the cell labels.
row_labels, col_labels: list-like or xarray.DataArray
Lists of labels in the order they index the matrix rows and columns, respectively.
show_row_labels, show_col_labels: bool
Whether to show the row or column labels, respectively.
show_cell_labels: bool
Whether to show values as cell labels or not.
cmap: matplotlib.colors.Colormap
A matplotlib colormap used to color the cells based on `cell_value_mtx`.
cell_val_fmt: str
Formatting string for values in the matrix cells.
annot_kwargs: dict
Keyword arguments for ``ax.text`` for formatting cell annotation text.
tick_fontsize: int
The fontsize of tick labels. Overridden by `x_axis_tick_kwargs` and `y_axis_tick_kwargs`.
x_axis_tick_kwargs, y_axis_tick_kwargs: dict
Keyword arguments for x and y axis tick labels, respectively.
Specifically, keyword arguments for calls to `ax.[x_axis,y_axis].set_ticklabels()`
where `ax` is the `matplotlib.axes.Axes` object returned by `seaborn.heatmap()`.
x_axis_ticks_position, y_axis_ticks_position: str
The position of x and y axis ticks, respectively.
For x_axis_ticks_position, possible values are ['top', 'bottom', 'both', 'default', 'none'].
For y_axis_ticks_position, possible values are ['left', 'right', 'both', 'default', 'none'].
See https://matplotlib.org/api/axis_api.html for more information.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If only `fig` is supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
heatmap_kwargs: dict
Dictionary of keyword arguments to `seaborn.heatmap()`.
Overrides any other relevant parameters passed to this function.
Some notable parameters include 'vmin', 'vmax', 'cbar', and 'cbar_kws'.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
Returns
-------
fig, ax: matplotlib.figure.Figure, matplotlib.axes.Axes
The figure and axes used for the plot.
|
Prints a matrix as a heatmap.
Inspired by https://gist.github.com/shaypal5/94c53d765083101efc0240d776a23823. | def print_matrix(cell_value_mtx, cell_label_mtx=None, row_labels=None, col_labels=None,
show_row_labels=True, show_col_labels=True, show_cell_labels=True,
cmap=None, cell_val_fmt='2g', annot_kwargs={}, tick_fontsize=14,
x_axis_tick_kwargs=None, y_axis_tick_kwargs=None,
x_axis_ticks_position='default', y_axis_ticks_position='default',
fig=None, ax=None, heatmap_kwargs={}, fig_kwargs={}):
"""
Prints a matrix as a heatmap.
Inspired by https://gist.github.com/shaypal5/94c53d765083101efc0240d776a23823.
Arguments
---------
cell_value_mtx: numpy.ndarray
A 2D NumPy array to be used as the cell values when coloring with the colormap.
cell_label_mtx: numpy.ndarray
A 2D NumPy array to be used as the cell labels.
row_labels, col_labels: list-like or xarray.DataArray
Lists of labels in the order they index the matrix rows and columns, respectively.
show_row_labels, show_col_labels: bool
Whether to show the row or column labels, respectively.
show_cell_labels: bool
Whether to show values as cell labels or not.
cmap: matplotlib.colors.Colormap
A matplotlib colormap used to color the cells based on `cell_value_mtx`.
cell_val_fmt: str
Formatting string for values in the matrix cells.
annot_kwargs: dict
Keyword arguments for ``ax.text`` for formatting cell annotation text.
tick_fontsize: int
The fontsize of tick labels. Overridden by `x_axis_tick_kwargs` and `y_axis_tick_kwargs`.
x_axis_tick_kwargs, y_axis_tick_kwargs: dict
Keyword arguments for x and y axis tick labels, respectively.
Specifically, keyword arguments for calls to `ax.[x_axis,y_axis].set_ticklabels()`
where `ax` is the `matplotlib.axes.Axes` object returned by `seaborn.heatmap()`.
x_axis_ticks_position, y_axis_ticks_position: str
The position of x and y axis ticks, respectively.
For x_axis_ticks_position, possible values are ['top', 'bottom', 'both', 'default', 'none'].
For y_axis_ticks_position, possible values are ['left', 'right', 'both', 'default', 'none'].
See https://matplotlib.org/api/axis_api.html for more information.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If only `fig` is supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
heatmap_kwargs: dict
Dictionary of keyword arguments to `seaborn.heatmap()`.
Overrides any other relevant parameters passed to this function.
Some notable parameters include 'vmin', 'vmax', 'cbar', and 'cbar_kws'.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
Returns
-------
fig, ax: matplotlib.figure.Figure, matplotlib.axes.Axes
The figure and axes used for the plot.
"""
cell_label_mtx = cell_value_mtx if cell_label_mtx is None else cell_label_mtx
row_labels = [''] * cell_value_mtx.shape[0] if not show_row_labels \
or row_labels is None else row_labels
col_labels = [''] * cell_value_mtx.shape[1] if not show_col_labels \
or col_labels is None else col_labels
heatmap_kwargs.setdefault('cbar', False)
df = pd.DataFrame(cell_value_mtx, index=row_labels, columns=col_labels)
cell_labels = cell_label_mtx if show_cell_labels else None
fig, ax = retrieve_or_create_fig_ax(fig, ax, **fig_kwargs)
heatmap = sns.heatmap(df, cmap=cmap, annot=cell_labels, fmt=cell_val_fmt,
annot_kws=annot_kwargs, ax=ax, **heatmap_kwargs)
if not show_row_labels:
heatmap.set_yticks([]) # Ticks must be hidden explicitly.
else:
if y_axis_tick_kwargs is None:
y_axis_tick_kwargs = dict(rotation=0, ha='right')
y_axis_tick_kwargs.setdefault('fontsize', tick_fontsize)
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), **y_axis_tick_kwargs)
heatmap.yaxis.set_ticks_position(y_axis_ticks_position)
heatmap.yaxis.tick_left() # Ticks will also appear on the right side otherwise.
if not show_col_labels:
heatmap.set_xticks([])
else:
if x_axis_tick_kwargs is None:
x_axis_tick_kwargs = dict(rotation=45, ha='right')
x_axis_tick_kwargs.setdefault('fontsize', tick_fontsize)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), **x_axis_tick_kwargs)
heatmap.xaxis.set_ticks_position(x_axis_ticks_position)
heatmap.xaxis.tick_bottom() # Ticks will also appear on the top side otherwise.
return fig, ax | [
"def",
"print_matrix",
"(",
"cell_value_mtx",
",",
"cell_label_mtx",
"=",
"None",
",",
"row_labels",
"=",
"None",
",",
"col_labels",
"=",
"None",
",",
"show_row_labels",
"=",
"True",
",",
"show_col_labels",
"=",
"True",
",",
"show_cell_labels",
"=",
"True",
",",
"cmap",
"=",
"None",
",",
"cell_val_fmt",
"=",
"'2g'",
",",
"annot_kwargs",
"=",
"{",
"}",
",",
"tick_fontsize",
"=",
"14",
",",
"x_axis_tick_kwargs",
"=",
"None",
",",
"y_axis_tick_kwargs",
"=",
"None",
",",
"x_axis_ticks_position",
"=",
"'default'",
",",
"y_axis_ticks_position",
"=",
"'default'",
",",
"fig",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"heatmap_kwargs",
"=",
"{",
"}",
",",
"fig_kwargs",
"=",
"{",
"}",
")",
":",
"cell_label_mtx",
"=",
"cell_value_mtx",
"if",
"cell_label_mtx",
"is",
"None",
"else",
"cell_label_mtx",
"row_labels",
"=",
"[",
"''",
"]",
"*",
"cell_value_mtx",
".",
"shape",
"[",
"0",
"]",
"if",
"not",
"show_row_labels",
"or",
"row_labels",
"is",
"None",
"else",
"row_labels",
"col_labels",
"=",
"[",
"''",
"]",
"*",
"cell_value_mtx",
".",
"shape",
"[",
"1",
"]",
"if",
"not",
"show_col_labels",
"or",
"col_labels",
"is",
"None",
"else",
"col_labels",
"heatmap_kwargs",
".",
"setdefault",
"(",
"'cbar'",
",",
"False",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"cell_value_mtx",
",",
"index",
"=",
"row_labels",
",",
"columns",
"=",
"col_labels",
")",
"cell_labels",
"=",
"cell_label_mtx",
"if",
"show_cell_labels",
"else",
"None",
"fig",
",",
"ax",
"=",
"retrieve_or_create_fig_ax",
"(",
"fig",
",",
"ax",
",",
"*",
"*",
"fig_kwargs",
")",
"heatmap",
"=",
"sns",
".",
"heatmap",
"(",
"df",
",",
"cmap",
"=",
"cmap",
",",
"annot",
"=",
"cell_labels",
",",
"fmt",
"=",
"cell_val_fmt",
",",
"annot_kws",
"=",
"annot_kwargs",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"heatmap_kwargs",
")",
"if",
"not",
"show_row_labels",
":",
"heatmap",
".",
"set_yticks",
"(",
"[",
"]",
")",
"# Ticks must be hidden explicitly.",
"else",
":",
"if",
"y_axis_tick_kwargs",
"is",
"None",
":",
"y_axis_tick_kwargs",
"=",
"dict",
"(",
"rotation",
"=",
"0",
",",
"ha",
"=",
"'right'",
")",
"y_axis_tick_kwargs",
".",
"setdefault",
"(",
"'fontsize'",
",",
"tick_fontsize",
")",
"heatmap",
".",
"yaxis",
".",
"set_ticklabels",
"(",
"heatmap",
".",
"yaxis",
".",
"get_ticklabels",
"(",
")",
",",
"*",
"*",
"y_axis_tick_kwargs",
")",
"heatmap",
".",
"yaxis",
".",
"set_ticks_position",
"(",
"y_axis_ticks_position",
")",
"heatmap",
".",
"yaxis",
".",
"tick_left",
"(",
")",
"# Ticks will also appear on the right side otherwise.",
"if",
"not",
"show_col_labels",
":",
"heatmap",
".",
"set_xticks",
"(",
"[",
"]",
")",
"else",
":",
"if",
"x_axis_tick_kwargs",
"is",
"None",
":",
"x_axis_tick_kwargs",
"=",
"dict",
"(",
"rotation",
"=",
"45",
",",
"ha",
"=",
"'right'",
")",
"x_axis_tick_kwargs",
".",
"setdefault",
"(",
"'fontsize'",
",",
"tick_fontsize",
")",
"heatmap",
".",
"xaxis",
".",
"set_ticklabels",
"(",
"heatmap",
".",
"xaxis",
".",
"get_ticklabels",
"(",
")",
",",
"*",
"*",
"x_axis_tick_kwargs",
")",
"heatmap",
".",
"xaxis",
".",
"set_ticks_position",
"(",
"x_axis_ticks_position",
")",
"heatmap",
".",
"xaxis",
".",
"tick_bottom",
"(",
")",
"# Ticks will also appear on the top side otherwise.",
"return",
"fig",
",",
"ax"
] | [
1638,
0
] | [
1724,
18
] | python | en | ['en', 'error', 'th'] | False |
get_ax_size | (fig, ax) |
Given matplotlib Figure (fig) and Axes (ax) objects, return
the width and height of the Axes object in inches as a list.
|
Given matplotlib Figure (fig) and Axes (ax) objects, return
the width and height of the Axes object in inches as a list.
| def get_ax_size(fig, ax):
"""
Given matplotlib Figure (fig) and Axes (ax) objects, return
the width and height of the Axes object in inches as a list.
"""
# Credit goes to https://stackoverflow.com/a/19306776/5449970.
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
return [bbox.width, bbox.height] | [
"def",
"get_ax_size",
"(",
"fig",
",",
"ax",
")",
":",
"# Credit goes to https://stackoverflow.com/a/19306776/5449970.",
"bbox",
"=",
"ax",
".",
"get_window_extent",
"(",
")",
".",
"transformed",
"(",
"fig",
".",
"dpi_scale_trans",
".",
"inverted",
"(",
")",
")",
"return",
"[",
"bbox",
".",
"width",
",",
"bbox",
".",
"height",
"]"
] | [
1727,
0
] | [
1734,
36
] | python | en | ['en', 'error', 'th'] | False |
xarray_imshow | (data, x_coord='longitude', y_coord='latitude', width=10,
fig=None, ax=None, use_colorbar=True, cbar_labels=None,
use_legend=False, legend_labels=None, fig_kwargs=None,
imshow_kwargs=None, x_label_kwargs=None, y_label_kwargs=None,
cbar_kwargs=None, nan_color='white', legend_kwargs=None,
ax_tick_label_kwargs=None, x_tick_label_kwargs=None,
y_tick_label_kwargs=None, title=None, title_kwargs=None,
possible_plot_values=None) |
Shows a heatmap of an xarray DataArray with only latitude and longitude dimensions.
Unlike matplotlib `imshow()` or `data.plot.imshow()`, this sets axes ticks and labels.
It also simplifies creating a colorbar and legend.
Parameters
----------
data: xarray.DataArray
The xarray.DataArray containing only latitude and longitude coordinates.
x_coord, y_coord: str
Names of the x and y coordinates in `data` to use as tick and axis labels.
width: numeric
The width of the created ``matplotlib.figure.Figure``, if none is supplied in `fig`.
The height will be set to maintain aspect ratio.
Will be overridden by `'figsize'` in `fig_kwargs`, if present.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
use_colorbar: bool
Whether or not to create a colorbar to the right of the axes.
cbar_labels: list
A list of strings to label the colorbar.
use_legend: bool
Whether or not to create a legend showing labels for unique values.
Only use if you are sure you have a low number of unique values.
legend_labels: dict
A mapping of values to legend labels.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
imshow_kwargs: dict
The dictionary of keyword arguments passed to `plt.imshow()`.
You can pass a colormap here with the key 'cmap'.
x_label_kwargs, y_label_kwargs: dict
Dictionaries of keyword arguments for
`Axes.set_xlabel()` and `Axes.set_ylabel()`, respectively.
They cannot reference the same dictionary.
cbar_kwargs: dict
The dictionary of keyword arguments passed to `plt.colorbar()`.
Some parameters of note include 'ticks', which is a list of values to place ticks at.
nan_color: str or list-like
The color used for NaN regions. Can be a string name of a matplotlib color or
a 3-tuple (list-like) of rgb values in range [0,255].
legend_kwargs: dict
The dictionary of keyword arguments passed to `plt.legend()`.
ax_tick_label_kwargs: dict
The dictionary of keyword arguments passed to `ax.tick_params()`.
x_tick_label_kwargs, y_tick_label_kwargs: dict
Dictionaries of keyword arguments passed to `ax.set_xticklabels()`
and `ax.set_yticklabels()`, respectively.
title: str
The title of the figure.
title_kwargs: dict
The dictionary of keyword arguments passed to `ax.set_title()`.
possible_plot_values: list-like
The possible range of values for `data`. The affects the coloring of the map and the legend entries.
Returns
-------
fig, ax, im, cbar: matplotlib.figure.Figure, matplotlib.axes.Axes,
matplotlib.image.AxesImage, matplotlib.colorbar.Colorbar
The figure and axes used as well as the image returned by `pyplot.imshow()` and the colorbar.
If `use_colorbar == False`, `cbar` will be `None`.
:Authors:
John Rattz ([email protected])
|
Shows a heatmap of an xarray DataArray with only latitude and longitude dimensions.
Unlike matplotlib `imshow()` or `data.plot.imshow()`, this sets axes ticks and labels.
It also simplifies creating a colorbar and legend. | def xarray_imshow(data, x_coord='longitude', y_coord='latitude', width=10,
fig=None, ax=None, use_colorbar=True, cbar_labels=None,
use_legend=False, legend_labels=None, fig_kwargs=None,
imshow_kwargs=None, x_label_kwargs=None, y_label_kwargs=None,
cbar_kwargs=None, nan_color='white', legend_kwargs=None,
ax_tick_label_kwargs=None, x_tick_label_kwargs=None,
y_tick_label_kwargs=None, title=None, title_kwargs=None,
possible_plot_values=None):
"""
Shows a heatmap of an xarray DataArray with only latitude and longitude dimensions.
Unlike matplotlib `imshow()` or `data.plot.imshow()`, this sets axes ticks and labels.
It also simplifies creating a colorbar and legend.
Parameters
----------
data: xarray.DataArray
The xarray.DataArray containing only latitude and longitude coordinates.
x_coord, y_coord: str
Names of the x and y coordinates in `data` to use as tick and axis labels.
width: numeric
The width of the created ``matplotlib.figure.Figure``, if none is supplied in `fig`.
The height will be set to maintain aspect ratio.
Will be overridden by `'figsize'` in `fig_kwargs`, if present.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
use_colorbar: bool
Whether or not to create a colorbar to the right of the axes.
cbar_labels: list
A list of strings to label the colorbar.
use_legend: bool
Whether or not to create a legend showing labels for unique values.
Only use if you are sure you have a low number of unique values.
legend_labels: dict
A mapping of values to legend labels.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
imshow_kwargs: dict
The dictionary of keyword arguments passed to `plt.imshow()`.
You can pass a colormap here with the key 'cmap'.
x_label_kwargs, y_label_kwargs: dict
Dictionaries of keyword arguments for
`Axes.set_xlabel()` and `Axes.set_ylabel()`, respectively.
They cannot reference the same dictionary.
cbar_kwargs: dict
The dictionary of keyword arguments passed to `plt.colorbar()`.
Some parameters of note include 'ticks', which is a list of values to place ticks at.
nan_color: str or list-like
The color used for NaN regions. Can be a string name of a matplotlib color or
a 3-tuple (list-like) of rgb values in range [0,255].
legend_kwargs: dict
The dictionary of keyword arguments passed to `plt.legend()`.
ax_tick_label_kwargs: dict
The dictionary of keyword arguments passed to `ax.tick_params()`.
x_tick_label_kwargs, y_tick_label_kwargs: dict
Dictionaries of keyword arguments passed to `ax.set_xticklabels()`
and `ax.set_yticklabels()`, respectively.
title: str
The title of the figure.
title_kwargs: dict
The dictionary of keyword arguments passed to `ax.set_title()`.
possible_plot_values: list-like
The possible range of values for `data`. The affects the coloring of the map and the legend entries.
Returns
-------
fig, ax, im, cbar: matplotlib.figure.Figure, matplotlib.axes.Axes,
matplotlib.image.AxesImage, matplotlib.colorbar.Colorbar
The figure and axes used as well as the image returned by `pyplot.imshow()` and the colorbar.
If `use_colorbar == False`, `cbar` will be `None`.
:Authors:
John Rattz ([email protected])
"""
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Figure kwargs
# Use `copy()` to avoid modifying the original dictionaries.
fig_kwargs = {} if fig_kwargs is None else fig_kwargs.copy()
figsize = \
fig_kwargs.setdefault('figsize', figure_ratio(data, x_coord, y_coord,
fixed_width=width))
# Imshow kwargs
imshow_kwargs = {} if imshow_kwargs is None else imshow_kwargs.copy()
imshow_kwargs.setdefault('interpolation', 'nearest')
nan_color = norm_color(nan_color) # Normalize color value for matplotlib.
fig, ax = retrieve_or_create_fig_ax(fig, ax, **fig_kwargs)
axsize = get_ax_size(fig, ax) # Scale fonts on axis size, not figure size.
# Axis label kwargs
x_label_kwargs = {} if x_label_kwargs is None else x_label_kwargs.copy()
y_label_kwargs = {} if y_label_kwargs is None else y_label_kwargs.copy()
# Axis tick label kwargs
ax_tick_label_kwargs = {} if ax_tick_label_kwargs is None else \
ax_tick_label_kwargs.copy()
x_tick_label_kwargs = {} if x_tick_label_kwargs is None else \
x_tick_label_kwargs
y_tick_label_kwargs = {} if y_tick_label_kwargs is None else \
y_tick_label_kwargs
# Handle display of NaN values.
data_arr = data.values
masked_array = np.ma.array(data_arr, mask=np.isnan(data_arr))
cmap = imshow_kwargs.setdefault('cmap', plt.get_cmap('viridis'))
cmap.set_bad(nan_color)
# Handle kwargs for `imshow()`.
vmin, vmax = (np.min(possible_plot_values), np.max(possible_plot_values)) \
if possible_plot_values is not None else (np.nanmin(data), np.nanmax(data))
imshow_kwargs.setdefault('vmin', vmin)
imshow_kwargs.setdefault('vmax', vmax)
im = ax.imshow(masked_array, **imshow_kwargs)
# Set axis labels and tick labels.
xarray_set_axes_labels(data, ax, x_coord, y_coord,
x_label_kwargs, y_label_kwargs,
ax_tick_label_kwargs,
x_tick_label_kwargs, y_tick_label_kwargs)
# Set the title.
if title is not None:
title_kwargs = {} if title_kwargs is None else title_kwargs.copy()
ax.set_title(title, **title_kwargs)
# Create a colorbar.
if use_colorbar:
cbar_kwargs = {} if cbar_kwargs is None else cbar_kwargs.copy()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="7.5%", pad=0.05)
cbar = fig.colorbar(im, ax=ax, cax=cax, **cbar_kwargs)
if cbar_labels is not None:
cbar.ax.set_yticklabels(cbar_labels)
else:
cbar = None
# Create a legend.
if use_legend:
legend_kwargs = {} if legend_kwargs is None else legend_kwargs.copy()
legend_kwargs.setdefault("framealpha", 0.4)
# Determine the legend labels. If no set of values to create legend entries for
# is specified, use the unique values.
if possible_plot_values is None:
legend_values = np.unique(data.values)
legend_values = legend_values[~np.isnan(legend_values)]
else:
legend_values = possible_plot_values
if legend_labels is None:
legend_labels = ["{}".format(value) for value in legend_values]
else:
legend_labels = [legend_labels.get(value, "{}".format(value)) for value in legend_values]
colors = [im.cmap(value/np.max(legend_values)) for value in legend_values]
patches = [mpatches.Patch(color=colors[i], label=legend_labels[i])
for i in range(len(legend_values))]
legend_kwargs.setdefault('loc', 'best')
legend_kwargs['handles'] = patches
ax.legend(**legend_kwargs)
return fig, ax, im, cbar | [
"def",
"xarray_imshow",
"(",
"data",
",",
"x_coord",
"=",
"'longitude'",
",",
"y_coord",
"=",
"'latitude'",
",",
"width",
"=",
"10",
",",
"fig",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"use_colorbar",
"=",
"True",
",",
"cbar_labels",
"=",
"None",
",",
"use_legend",
"=",
"False",
",",
"legend_labels",
"=",
"None",
",",
"fig_kwargs",
"=",
"None",
",",
"imshow_kwargs",
"=",
"None",
",",
"x_label_kwargs",
"=",
"None",
",",
"y_label_kwargs",
"=",
"None",
",",
"cbar_kwargs",
"=",
"None",
",",
"nan_color",
"=",
"'white'",
",",
"legend_kwargs",
"=",
"None",
",",
"ax_tick_label_kwargs",
"=",
"None",
",",
"x_tick_label_kwargs",
"=",
"None",
",",
"y_tick_label_kwargs",
"=",
"None",
",",
"title",
"=",
"None",
",",
"title_kwargs",
"=",
"None",
",",
"possible_plot_values",
"=",
"None",
")",
":",
"from",
"mpl_toolkits",
".",
"axes_grid1",
"import",
"make_axes_locatable",
"# Figure kwargs",
"# Use `copy()` to avoid modifying the original dictionaries.",
"fig_kwargs",
"=",
"{",
"}",
"if",
"fig_kwargs",
"is",
"None",
"else",
"fig_kwargs",
".",
"copy",
"(",
")",
"figsize",
"=",
"fig_kwargs",
".",
"setdefault",
"(",
"'figsize'",
",",
"figure_ratio",
"(",
"data",
",",
"x_coord",
",",
"y_coord",
",",
"fixed_width",
"=",
"width",
")",
")",
"# Imshow kwargs",
"imshow_kwargs",
"=",
"{",
"}",
"if",
"imshow_kwargs",
"is",
"None",
"else",
"imshow_kwargs",
".",
"copy",
"(",
")",
"imshow_kwargs",
".",
"setdefault",
"(",
"'interpolation'",
",",
"'nearest'",
")",
"nan_color",
"=",
"norm_color",
"(",
"nan_color",
")",
"# Normalize color value for matplotlib.",
"fig",
",",
"ax",
"=",
"retrieve_or_create_fig_ax",
"(",
"fig",
",",
"ax",
",",
"*",
"*",
"fig_kwargs",
")",
"axsize",
"=",
"get_ax_size",
"(",
"fig",
",",
"ax",
")",
"# Scale fonts on axis size, not figure size.",
"# Axis label kwargs",
"x_label_kwargs",
"=",
"{",
"}",
"if",
"x_label_kwargs",
"is",
"None",
"else",
"x_label_kwargs",
".",
"copy",
"(",
")",
"y_label_kwargs",
"=",
"{",
"}",
"if",
"y_label_kwargs",
"is",
"None",
"else",
"y_label_kwargs",
".",
"copy",
"(",
")",
"# Axis tick label kwargs",
"ax_tick_label_kwargs",
"=",
"{",
"}",
"if",
"ax_tick_label_kwargs",
"is",
"None",
"else",
"ax_tick_label_kwargs",
".",
"copy",
"(",
")",
"x_tick_label_kwargs",
"=",
"{",
"}",
"if",
"x_tick_label_kwargs",
"is",
"None",
"else",
"x_tick_label_kwargs",
"y_tick_label_kwargs",
"=",
"{",
"}",
"if",
"y_tick_label_kwargs",
"is",
"None",
"else",
"y_tick_label_kwargs",
"# Handle display of NaN values.",
"data_arr",
"=",
"data",
".",
"values",
"masked_array",
"=",
"np",
".",
"ma",
".",
"array",
"(",
"data_arr",
",",
"mask",
"=",
"np",
".",
"isnan",
"(",
"data_arr",
")",
")",
"cmap",
"=",
"imshow_kwargs",
".",
"setdefault",
"(",
"'cmap'",
",",
"plt",
".",
"get_cmap",
"(",
"'viridis'",
")",
")",
"cmap",
".",
"set_bad",
"(",
"nan_color",
")",
"# Handle kwargs for `imshow()`.",
"vmin",
",",
"vmax",
"=",
"(",
"np",
".",
"min",
"(",
"possible_plot_values",
")",
",",
"np",
".",
"max",
"(",
"possible_plot_values",
")",
")",
"if",
"possible_plot_values",
"is",
"not",
"None",
"else",
"(",
"np",
".",
"nanmin",
"(",
"data",
")",
",",
"np",
".",
"nanmax",
"(",
"data",
")",
")",
"imshow_kwargs",
".",
"setdefault",
"(",
"'vmin'",
",",
"vmin",
")",
"imshow_kwargs",
".",
"setdefault",
"(",
"'vmax'",
",",
"vmax",
")",
"im",
"=",
"ax",
".",
"imshow",
"(",
"masked_array",
",",
"*",
"*",
"imshow_kwargs",
")",
"# Set axis labels and tick labels.",
"xarray_set_axes_labels",
"(",
"data",
",",
"ax",
",",
"x_coord",
",",
"y_coord",
",",
"x_label_kwargs",
",",
"y_label_kwargs",
",",
"ax_tick_label_kwargs",
",",
"x_tick_label_kwargs",
",",
"y_tick_label_kwargs",
")",
"# Set the title.",
"if",
"title",
"is",
"not",
"None",
":",
"title_kwargs",
"=",
"{",
"}",
"if",
"title_kwargs",
"is",
"None",
"else",
"title_kwargs",
".",
"copy",
"(",
")",
"ax",
".",
"set_title",
"(",
"title",
",",
"*",
"*",
"title_kwargs",
")",
"# Create a colorbar.",
"if",
"use_colorbar",
":",
"cbar_kwargs",
"=",
"{",
"}",
"if",
"cbar_kwargs",
"is",
"None",
"else",
"cbar_kwargs",
".",
"copy",
"(",
")",
"divider",
"=",
"make_axes_locatable",
"(",
"ax",
")",
"cax",
"=",
"divider",
".",
"append_axes",
"(",
"\"right\"",
",",
"size",
"=",
"\"7.5%\"",
",",
"pad",
"=",
"0.05",
")",
"cbar",
"=",
"fig",
".",
"colorbar",
"(",
"im",
",",
"ax",
"=",
"ax",
",",
"cax",
"=",
"cax",
",",
"*",
"*",
"cbar_kwargs",
")",
"if",
"cbar_labels",
"is",
"not",
"None",
":",
"cbar",
".",
"ax",
".",
"set_yticklabels",
"(",
"cbar_labels",
")",
"else",
":",
"cbar",
"=",
"None",
"# Create a legend.",
"if",
"use_legend",
":",
"legend_kwargs",
"=",
"{",
"}",
"if",
"legend_kwargs",
"is",
"None",
"else",
"legend_kwargs",
".",
"copy",
"(",
")",
"legend_kwargs",
".",
"setdefault",
"(",
"\"framealpha\"",
",",
"0.4",
")",
"# Determine the legend labels. If no set of values to create legend entries for",
"# is specified, use the unique values.",
"if",
"possible_plot_values",
"is",
"None",
":",
"legend_values",
"=",
"np",
".",
"unique",
"(",
"data",
".",
"values",
")",
"legend_values",
"=",
"legend_values",
"[",
"~",
"np",
".",
"isnan",
"(",
"legend_values",
")",
"]",
"else",
":",
"legend_values",
"=",
"possible_plot_values",
"if",
"legend_labels",
"is",
"None",
":",
"legend_labels",
"=",
"[",
"\"{}\"",
".",
"format",
"(",
"value",
")",
"for",
"value",
"in",
"legend_values",
"]",
"else",
":",
"legend_labels",
"=",
"[",
"legend_labels",
".",
"get",
"(",
"value",
",",
"\"{}\"",
".",
"format",
"(",
"value",
")",
")",
"for",
"value",
"in",
"legend_values",
"]",
"colors",
"=",
"[",
"im",
".",
"cmap",
"(",
"value",
"/",
"np",
".",
"max",
"(",
"legend_values",
")",
")",
"for",
"value",
"in",
"legend_values",
"]",
"patches",
"=",
"[",
"mpatches",
".",
"Patch",
"(",
"color",
"=",
"colors",
"[",
"i",
"]",
",",
"label",
"=",
"legend_labels",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"legend_values",
")",
")",
"]",
"legend_kwargs",
".",
"setdefault",
"(",
"'loc'",
",",
"'best'",
")",
"legend_kwargs",
"[",
"'handles'",
"]",
"=",
"patches",
"ax",
".",
"legend",
"(",
"*",
"*",
"legend_kwargs",
")",
"return",
"fig",
",",
"ax",
",",
"im",
",",
"cbar"
] | [
1737,
0
] | [
1901,
28
] | python | en | ['en', 'error', 'th'] | False |
convert_coords | (data) |
Takes in the dataframe of all WSC stations
and converts lat/lon/elevation to xyz for
more accurate distance measurements between
stations.
|
Takes in the dataframe of all WSC stations
and converts lat/lon/elevation to xyz for
more accurate distance measurements between
stations.
| def convert_coords(data):
"""
Takes in the dataframe of all WSC stations
and converts lat/lon/elevation to xyz for
more accurate distance measurements between
stations.
"""
data['Latitude'].dropna(inplace=True)
data['Longitude'].dropna(inplace=True)
data['Latitude'] = data['Latitude'].astype(
float)
data['Longitude'] = data['Longitude'].astype(
float)
data['dec_deg_latlon'] = data[[
'Latitude', 'Longitude']].values.tolist()
# convert decimal degrees to utm and make new columns for UTM Northing and Easting
data['utm_latlon'] = [utm.from_latlon(
e[0], e[1]) for e in data['dec_deg_latlon']]
data['utm_E'] = [e[0] for e in data['utm_latlon']]
data['utm_N'] = [e[1] for e in data['utm_latlon']]
xyz = pd.DataFrame()
xyz['r'] = 6378137 + data['Elevation']
xyz['x'] = xyz['r'] * \
np.cos(data['Latitude'].apply(deg2rad)) * \
np.cos(data['Longitude'].apply(deg2rad))
xyz['y'] = xyz['r'] * \
np.cos(data['Latitude'].apply(deg2rad)) * \
np.sin(data['Longitude'].apply(deg2rad))
xyz['z'] = xyz['r'] * \
np.sin(data['Latitude'].apply(deg2rad)) * (1 - 1 / 298.257223563)
data['xyz_coords'] = xyz[['x', 'y', 'z']].values.tolist()
return data | [
"def",
"convert_coords",
"(",
"data",
")",
":",
"data",
"[",
"'Latitude'",
"]",
".",
"dropna",
"(",
"inplace",
"=",
"True",
")",
"data",
"[",
"'Longitude'",
"]",
".",
"dropna",
"(",
"inplace",
"=",
"True",
")",
"data",
"[",
"'Latitude'",
"]",
"=",
"data",
"[",
"'Latitude'",
"]",
".",
"astype",
"(",
"float",
")",
"data",
"[",
"'Longitude'",
"]",
"=",
"data",
"[",
"'Longitude'",
"]",
".",
"astype",
"(",
"float",
")",
"data",
"[",
"'dec_deg_latlon'",
"]",
"=",
"data",
"[",
"[",
"'Latitude'",
",",
"'Longitude'",
"]",
"]",
".",
"values",
".",
"tolist",
"(",
")",
"# convert decimal degrees to utm and make new columns for UTM Northing and Easting",
"data",
"[",
"'utm_latlon'",
"]",
"=",
"[",
"utm",
".",
"from_latlon",
"(",
"e",
"[",
"0",
"]",
",",
"e",
"[",
"1",
"]",
")",
"for",
"e",
"in",
"data",
"[",
"'dec_deg_latlon'",
"]",
"]",
"data",
"[",
"'utm_E'",
"]",
"=",
"[",
"e",
"[",
"0",
"]",
"for",
"e",
"in",
"data",
"[",
"'utm_latlon'",
"]",
"]",
"data",
"[",
"'utm_N'",
"]",
"=",
"[",
"e",
"[",
"1",
"]",
"for",
"e",
"in",
"data",
"[",
"'utm_latlon'",
"]",
"]",
"xyz",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"xyz",
"[",
"'r'",
"]",
"=",
"6378137",
"+",
"data",
"[",
"'Elevation'",
"]",
"xyz",
"[",
"'x'",
"]",
"=",
"xyz",
"[",
"'r'",
"]",
"*",
"np",
".",
"cos",
"(",
"data",
"[",
"'Latitude'",
"]",
".",
"apply",
"(",
"deg2rad",
")",
")",
"*",
"np",
".",
"cos",
"(",
"data",
"[",
"'Longitude'",
"]",
".",
"apply",
"(",
"deg2rad",
")",
")",
"xyz",
"[",
"'y'",
"]",
"=",
"xyz",
"[",
"'r'",
"]",
"*",
"np",
".",
"cos",
"(",
"data",
"[",
"'Latitude'",
"]",
".",
"apply",
"(",
"deg2rad",
")",
")",
"*",
"np",
".",
"sin",
"(",
"data",
"[",
"'Longitude'",
"]",
".",
"apply",
"(",
"deg2rad",
")",
")",
"xyz",
"[",
"'z'",
"]",
"=",
"xyz",
"[",
"'r'",
"]",
"*",
"np",
".",
"sin",
"(",
"data",
"[",
"'Latitude'",
"]",
".",
"apply",
"(",
"deg2rad",
")",
")",
"*",
"(",
"1",
"-",
"1",
"/",
"298.257223563",
")",
"data",
"[",
"'xyz_coords'",
"]",
"=",
"xyz",
"[",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
"]",
"]",
".",
"values",
".",
"tolist",
"(",
")",
"return",
"data"
] | [
22,
0
] | [
61,
15
] | python | en | ['en', 'error', 'th'] | False |
BaseTLearner.__init__ | (self, learner=None, control_learner=None, treatment_learner=None, ate_alpha=.05, control_name=0) | Initialize a T-learner.
Args:
learner (model): a model to estimate control and treatment outcomes.
control_learner (model, optional): a model to estimate control outcomes
treatment_learner (model, optional): a model to estimate treatment outcomes
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
| Initialize a T-learner. | def __init__(self, learner=None, control_learner=None, treatment_learner=None, ate_alpha=.05, control_name=0):
"""Initialize a T-learner.
Args:
learner (model): a model to estimate control and treatment outcomes.
control_learner (model, optional): a model to estimate control outcomes
treatment_learner (model, optional): a model to estimate treatment outcomes
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
"""
assert (learner is not None) or ((control_learner is not None) and (treatment_learner is not None))
if control_learner is None:
self.model_c = deepcopy(learner)
else:
self.model_c = control_learner
if treatment_learner is None:
self.model_t = deepcopy(learner)
else:
self.model_t = treatment_learner
self.ate_alpha = ate_alpha
self.control_name = control_name | [
"def",
"__init__",
"(",
"self",
",",
"learner",
"=",
"None",
",",
"control_learner",
"=",
"None",
",",
"treatment_learner",
"=",
"None",
",",
"ate_alpha",
"=",
".05",
",",
"control_name",
"=",
"0",
")",
":",
"assert",
"(",
"learner",
"is",
"not",
"None",
")",
"or",
"(",
"(",
"control_learner",
"is",
"not",
"None",
")",
"and",
"(",
"treatment_learner",
"is",
"not",
"None",
")",
")",
"if",
"control_learner",
"is",
"None",
":",
"self",
".",
"model_c",
"=",
"deepcopy",
"(",
"learner",
")",
"else",
":",
"self",
".",
"model_c",
"=",
"control_learner",
"if",
"treatment_learner",
"is",
"None",
":",
"self",
".",
"model_t",
"=",
"deepcopy",
"(",
"learner",
")",
"else",
":",
"self",
".",
"model_t",
"=",
"treatment_learner",
"self",
".",
"ate_alpha",
"=",
"ate_alpha",
"self",
".",
"control_name",
"=",
"control_name"
] | [
32,
4
] | [
55,
40
] | python | en | ['en', 'en', 'it'] | True |
BaseTLearner.fit | (self, X, treatment, y, p=None) | Fit the inference model
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
| Fit the inference model | def fit(self, X, treatment, y, p=None):
"""Fit the inference model
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
check_treatment_vector(treatment, self.control_name)
self.t_groups = np.unique(treatment[treatment != self.control_name])
self.t_groups.sort()
self._classes = {group: i for i, group in enumerate(self.t_groups)}
self.models_c = {group: deepcopy(self.model_c) for group in self.t_groups}
self.models_t = {group: deepcopy(self.model_t) for group in self.t_groups}
for group in self.t_groups:
mask = (treatment == group) | (treatment == self.control_name)
treatment_filt = treatment[mask]
X_filt = X[mask]
y_filt = y[mask]
w = (treatment_filt == group).astype(int)
self.models_c[group].fit(X_filt[w == 0], y_filt[w == 0])
self.models_t[group].fit(X_filt[w == 1], y_filt[w == 1]) | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"treatment",
",",
"y",
",",
"p",
"=",
"None",
")",
":",
"X",
",",
"treatment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"check_treatment_vector",
"(",
"treatment",
",",
"self",
".",
"control_name",
")",
"self",
".",
"t_groups",
"=",
"np",
".",
"unique",
"(",
"treatment",
"[",
"treatment",
"!=",
"self",
".",
"control_name",
"]",
")",
"self",
".",
"t_groups",
".",
"sort",
"(",
")",
"self",
".",
"_classes",
"=",
"{",
"group",
":",
"i",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"t_groups",
")",
"}",
"self",
".",
"models_c",
"=",
"{",
"group",
":",
"deepcopy",
"(",
"self",
".",
"model_c",
")",
"for",
"group",
"in",
"self",
".",
"t_groups",
"}",
"self",
".",
"models_t",
"=",
"{",
"group",
":",
"deepcopy",
"(",
"self",
".",
"model_t",
")",
"for",
"group",
"in",
"self",
".",
"t_groups",
"}",
"for",
"group",
"in",
"self",
".",
"t_groups",
":",
"mask",
"=",
"(",
"treatment",
"==",
"group",
")",
"|",
"(",
"treatment",
"==",
"self",
".",
"control_name",
")",
"treatment_filt",
"=",
"treatment",
"[",
"mask",
"]",
"X_filt",
"=",
"X",
"[",
"mask",
"]",
"y_filt",
"=",
"y",
"[",
"mask",
"]",
"w",
"=",
"(",
"treatment_filt",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
"self",
".",
"models_c",
"[",
"group",
"]",
".",
"fit",
"(",
"X_filt",
"[",
"w",
"==",
"0",
"]",
",",
"y_filt",
"[",
"w",
"==",
"0",
"]",
")",
"self",
".",
"models_t",
"[",
"group",
"]",
".",
"fit",
"(",
"X_filt",
"[",
"w",
"==",
"1",
"]",
",",
"y_filt",
"[",
"w",
"==",
"1",
"]",
")"
] | [
63,
4
] | [
87,
68
] | python | en | ['en', 'en', 'en'] | True |
BaseTLearner.predict | (self, X, treatment=None, y=None, p=None, return_components=False, verbose=True) | Predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series, optional): a treatment vector
y (np.array or pd.Series, optional): an outcome vector
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (bool, optional): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects.
| Predict treatment effects. | def predict(self, X, treatment=None, y=None, p=None, return_components=False, verbose=True):
"""Predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series, optional): a treatment vector
y (np.array or pd.Series, optional): an outcome vector
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (bool, optional): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects.
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
yhat_cs = {}
yhat_ts = {}
for group in self.t_groups:
model_c = self.models_c[group]
model_t = self.models_t[group]
yhat_cs[group] = model_c.predict(X)
yhat_ts[group] = model_t.predict(X)
if (y is not None) and (treatment is not None) and verbose:
mask = (treatment == group) | (treatment == self.control_name)
treatment_filt = treatment[mask]
y_filt = y[mask]
w = (treatment_filt == group).astype(int)
yhat = np.zeros_like(y_filt, dtype=float)
yhat[w == 0] = yhat_cs[group][mask][w == 0]
yhat[w == 1] = yhat_ts[group][mask][w == 1]
logger.info('Error metrics for group {}'.format(group))
regression_metrics(y_filt, yhat, w)
te = np.zeros((X.shape[0], self.t_groups.shape[0]))
for i, group in enumerate(self.t_groups):
te[:, i] = yhat_ts[group] - yhat_cs[group]
if not return_components:
return te
else:
return te, yhat_cs, yhat_ts | [
"def",
"predict",
"(",
"self",
",",
"X",
",",
"treatment",
"=",
"None",
",",
"y",
"=",
"None",
",",
"p",
"=",
"None",
",",
"return_components",
"=",
"False",
",",
"verbose",
"=",
"True",
")",
":",
"X",
",",
"treatment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"yhat_cs",
"=",
"{",
"}",
"yhat_ts",
"=",
"{",
"}",
"for",
"group",
"in",
"self",
".",
"t_groups",
":",
"model_c",
"=",
"self",
".",
"models_c",
"[",
"group",
"]",
"model_t",
"=",
"self",
".",
"models_t",
"[",
"group",
"]",
"yhat_cs",
"[",
"group",
"]",
"=",
"model_c",
".",
"predict",
"(",
"X",
")",
"yhat_ts",
"[",
"group",
"]",
"=",
"model_t",
".",
"predict",
"(",
"X",
")",
"if",
"(",
"y",
"is",
"not",
"None",
")",
"and",
"(",
"treatment",
"is",
"not",
"None",
")",
"and",
"verbose",
":",
"mask",
"=",
"(",
"treatment",
"==",
"group",
")",
"|",
"(",
"treatment",
"==",
"self",
".",
"control_name",
")",
"treatment_filt",
"=",
"treatment",
"[",
"mask",
"]",
"y_filt",
"=",
"y",
"[",
"mask",
"]",
"w",
"=",
"(",
"treatment_filt",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
"yhat",
"=",
"np",
".",
"zeros_like",
"(",
"y_filt",
",",
"dtype",
"=",
"float",
")",
"yhat",
"[",
"w",
"==",
"0",
"]",
"=",
"yhat_cs",
"[",
"group",
"]",
"[",
"mask",
"]",
"[",
"w",
"==",
"0",
"]",
"yhat",
"[",
"w",
"==",
"1",
"]",
"=",
"yhat_ts",
"[",
"group",
"]",
"[",
"mask",
"]",
"[",
"w",
"==",
"1",
"]",
"logger",
".",
"info",
"(",
"'Error metrics for group {}'",
".",
"format",
"(",
"group",
")",
")",
"regression_metrics",
"(",
"y_filt",
",",
"yhat",
",",
"w",
")",
"te",
"=",
"np",
".",
"zeros",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
")",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"t_groups",
")",
":",
"te",
"[",
":",
",",
"i",
"]",
"=",
"yhat_ts",
"[",
"group",
"]",
"-",
"yhat_cs",
"[",
"group",
"]",
"if",
"not",
"return_components",
":",
"return",
"te",
"else",
":",
"return",
"te",
",",
"yhat_cs",
",",
"yhat_ts"
] | [
89,
4
] | [
131,
39
] | python | en | ['fr', 'en', 'en'] | True |
BaseTLearner.fit_predict | (self, X, treatment, y, p=None, return_ci=False, n_bootstraps=1000, bootstrap_size=10000,
return_components=False, verbose=True) | Fit the inference model of the T learner and predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
return_ci (bool): whether to return confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (str): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects. Output dim: [n_samples, n_treatment].
If return_ci, returns CATE [n_samples, n_treatment], LB [n_samples, n_treatment],
UB [n_samples, n_treatment]
| Fit the inference model of the T learner and predict treatment effects. | def fit_predict(self, X, treatment, y, p=None, return_ci=False, n_bootstraps=1000, bootstrap_size=10000,
return_components=False, verbose=True):
"""Fit the inference model of the T learner and predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
return_ci (bool): whether to return confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (str): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects. Output dim: [n_samples, n_treatment].
If return_ci, returns CATE [n_samples, n_treatment], LB [n_samples, n_treatment],
UB [n_samples, n_treatment]
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
self.fit(X, treatment, y)
te = self.predict(X, treatment, y, return_components=return_components)
if not return_ci:
return te
else:
t_groups_global = self.t_groups
_classes_global = self._classes
models_c_global = deepcopy(self.models_c)
models_t_global = deepcopy(self.models_t)
te_bootstraps = np.zeros(shape=(X.shape[0], self.t_groups.shape[0], n_bootstraps))
logger.info('Bootstrap Confidence Intervals')
for i in tqdm(range(n_bootstraps)):
te_b = self.bootstrap(X, treatment, y, size=bootstrap_size)
te_bootstraps[:, :, i] = te_b
te_lower = np.percentile(te_bootstraps, (self.ate_alpha/2)*100, axis=2)
te_upper = np.percentile(te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=2)
# set member variables back to global (currently last bootstrapped outcome)
self.t_groups = t_groups_global
self._classes = _classes_global
self.models_c = deepcopy(models_c_global)
self.models_t = deepcopy(models_t_global)
return (te, te_lower, te_upper) | [
"def",
"fit_predict",
"(",
"self",
",",
"X",
",",
"treatment",
",",
"y",
",",
"p",
"=",
"None",
",",
"return_ci",
"=",
"False",
",",
"n_bootstraps",
"=",
"1000",
",",
"bootstrap_size",
"=",
"10000",
",",
"return_components",
"=",
"False",
",",
"verbose",
"=",
"True",
")",
":",
"X",
",",
"treatment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"self",
".",
"fit",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"te",
"=",
"self",
".",
"predict",
"(",
"X",
",",
"treatment",
",",
"y",
",",
"return_components",
"=",
"return_components",
")",
"if",
"not",
"return_ci",
":",
"return",
"te",
"else",
":",
"t_groups_global",
"=",
"self",
".",
"t_groups",
"_classes_global",
"=",
"self",
".",
"_classes",
"models_c_global",
"=",
"deepcopy",
"(",
"self",
".",
"models_c",
")",
"models_t_global",
"=",
"deepcopy",
"(",
"self",
".",
"models_t",
")",
"te_bootstraps",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
",",
"n_bootstraps",
")",
")",
"logger",
".",
"info",
"(",
"'Bootstrap Confidence Intervals'",
")",
"for",
"i",
"in",
"tqdm",
"(",
"range",
"(",
"n_bootstraps",
")",
")",
":",
"te_b",
"=",
"self",
".",
"bootstrap",
"(",
"X",
",",
"treatment",
",",
"y",
",",
"size",
"=",
"bootstrap_size",
")",
"te_bootstraps",
"[",
":",
",",
":",
",",
"i",
"]",
"=",
"te_b",
"te_lower",
"=",
"np",
".",
"percentile",
"(",
"te_bootstraps",
",",
"(",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"*",
"100",
",",
"axis",
"=",
"2",
")",
"te_upper",
"=",
"np",
".",
"percentile",
"(",
"te_bootstraps",
",",
"(",
"1",
"-",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"*",
"100",
",",
"axis",
"=",
"2",
")",
"# set member variables back to global (currently last bootstrapped outcome)",
"self",
".",
"t_groups",
"=",
"t_groups_global",
"self",
".",
"_classes",
"=",
"_classes_global",
"self",
".",
"models_c",
"=",
"deepcopy",
"(",
"models_c_global",
")",
"self",
".",
"models_t",
"=",
"deepcopy",
"(",
"models_t_global",
")",
"return",
"(",
"te",
",",
"te_lower",
",",
"te_upper",
")"
] | [
133,
4
] | [
178,
43
] | python | en | ['en', 'en', 'en'] | True |
BaseTLearner.estimate_ate | (self, X, treatment, y, p=None, bootstrap_ci=False, n_bootstraps=1000, bootstrap_size=10000) | Estimate the Average Treatment Effect (ATE).
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
bootstrap_ci (bool): whether to return confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
Returns:
The mean and confidence interval (LB, UB) of the ATE estimate.
| Estimate the Average Treatment Effect (ATE). | def estimate_ate(self, X, treatment, y, p=None, bootstrap_ci=False, n_bootstraps=1000, bootstrap_size=10000):
"""Estimate the Average Treatment Effect (ATE).
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
bootstrap_ci (bool): whether to return confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
Returns:
The mean and confidence interval (LB, UB) of the ATE estimate.
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
te, yhat_cs, yhat_ts = self.fit_predict(X, treatment, y, return_components=True)
ate = np.zeros(self.t_groups.shape[0])
ate_lb = np.zeros(self.t_groups.shape[0])
ate_ub = np.zeros(self.t_groups.shape[0])
for i, group in enumerate(self.t_groups):
_ate = te[:, i].mean()
mask = (treatment == group) | (treatment == self.control_name)
treatment_filt = treatment[mask]
y_filt = y[mask]
w = (treatment_filt == group).astype(int)
prob_treatment = float(sum(w)) / w.shape[0]
yhat_c = yhat_cs[group][mask]
yhat_t = yhat_ts[group][mask]
se = np.sqrt((
(y_filt[w == 0] - yhat_c[w == 0]).var()
/ (1 - prob_treatment) +
(y_filt[w == 1] - yhat_t[w == 1]).var()
/ prob_treatment +
(yhat_t - yhat_c).var()
) / y_filt.shape[0])
_ate_lb = _ate - se * norm.ppf(1 - self.ate_alpha / 2)
_ate_ub = _ate + se * norm.ppf(1 - self.ate_alpha / 2)
ate[i] = _ate
ate_lb[i] = _ate_lb
ate_ub[i] = _ate_ub
if not bootstrap_ci:
return ate, ate_lb, ate_ub
else:
t_groups_global = self.t_groups
_classes_global = self._classes
models_c_global = deepcopy(self.models_c)
models_t_global = deepcopy(self.models_t)
logger.info('Bootstrap Confidence Intervals for ATE')
ate_bootstraps = np.zeros(shape=(self.t_groups.shape[0], n_bootstraps))
for n in tqdm(range(n_bootstraps)):
ate_b = self.bootstrap(X, treatment, y, size=bootstrap_size)
ate_bootstraps[:, n] = ate_b.mean()
ate_lower = np.percentile(ate_bootstraps, (self.ate_alpha / 2) * 100, axis=1)
ate_upper = np.percentile(ate_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=1)
# set member variables back to global (currently last bootstrapped outcome)
self.t_groups = t_groups_global
self._classes = _classes_global
self.models_c = deepcopy(models_c_global)
self.models_t = deepcopy(models_t_global)
return ate, ate_lower, ate_upper | [
"def",
"estimate_ate",
"(",
"self",
",",
"X",
",",
"treatment",
",",
"y",
",",
"p",
"=",
"None",
",",
"bootstrap_ci",
"=",
"False",
",",
"n_bootstraps",
"=",
"1000",
",",
"bootstrap_size",
"=",
"10000",
")",
":",
"X",
",",
"treatment",
",",
"y",
"=",
"convert_pd_to_np",
"(",
"X",
",",
"treatment",
",",
"y",
")",
"te",
",",
"yhat_cs",
",",
"yhat_ts",
"=",
"self",
".",
"fit_predict",
"(",
"X",
",",
"treatment",
",",
"y",
",",
"return_components",
"=",
"True",
")",
"ate",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
"ate_lb",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
"ate_ub",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"t_groups",
")",
":",
"_ate",
"=",
"te",
"[",
":",
",",
"i",
"]",
".",
"mean",
"(",
")",
"mask",
"=",
"(",
"treatment",
"==",
"group",
")",
"|",
"(",
"treatment",
"==",
"self",
".",
"control_name",
")",
"treatment_filt",
"=",
"treatment",
"[",
"mask",
"]",
"y_filt",
"=",
"y",
"[",
"mask",
"]",
"w",
"=",
"(",
"treatment_filt",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
"prob_treatment",
"=",
"float",
"(",
"sum",
"(",
"w",
")",
")",
"/",
"w",
".",
"shape",
"[",
"0",
"]",
"yhat_c",
"=",
"yhat_cs",
"[",
"group",
"]",
"[",
"mask",
"]",
"yhat_t",
"=",
"yhat_ts",
"[",
"group",
"]",
"[",
"mask",
"]",
"se",
"=",
"np",
".",
"sqrt",
"(",
"(",
"(",
"y_filt",
"[",
"w",
"==",
"0",
"]",
"-",
"yhat_c",
"[",
"w",
"==",
"0",
"]",
")",
".",
"var",
"(",
")",
"/",
"(",
"1",
"-",
"prob_treatment",
")",
"+",
"(",
"y_filt",
"[",
"w",
"==",
"1",
"]",
"-",
"yhat_t",
"[",
"w",
"==",
"1",
"]",
")",
".",
"var",
"(",
")",
"/",
"prob_treatment",
"+",
"(",
"yhat_t",
"-",
"yhat_c",
")",
".",
"var",
"(",
")",
")",
"/",
"y_filt",
".",
"shape",
"[",
"0",
"]",
")",
"_ate_lb",
"=",
"_ate",
"-",
"se",
"*",
"norm",
".",
"ppf",
"(",
"1",
"-",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"_ate_ub",
"=",
"_ate",
"+",
"se",
"*",
"norm",
".",
"ppf",
"(",
"1",
"-",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"ate",
"[",
"i",
"]",
"=",
"_ate",
"ate_lb",
"[",
"i",
"]",
"=",
"_ate_lb",
"ate_ub",
"[",
"i",
"]",
"=",
"_ate_ub",
"if",
"not",
"bootstrap_ci",
":",
"return",
"ate",
",",
"ate_lb",
",",
"ate_ub",
"else",
":",
"t_groups_global",
"=",
"self",
".",
"t_groups",
"_classes_global",
"=",
"self",
".",
"_classes",
"models_c_global",
"=",
"deepcopy",
"(",
"self",
".",
"models_c",
")",
"models_t_global",
"=",
"deepcopy",
"(",
"self",
".",
"models_t",
")",
"logger",
".",
"info",
"(",
"'Bootstrap Confidence Intervals for ATE'",
")",
"ate_bootstraps",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
",",
"n_bootstraps",
")",
")",
"for",
"n",
"in",
"tqdm",
"(",
"range",
"(",
"n_bootstraps",
")",
")",
":",
"ate_b",
"=",
"self",
".",
"bootstrap",
"(",
"X",
",",
"treatment",
",",
"y",
",",
"size",
"=",
"bootstrap_size",
")",
"ate_bootstraps",
"[",
":",
",",
"n",
"]",
"=",
"ate_b",
".",
"mean",
"(",
")",
"ate_lower",
"=",
"np",
".",
"percentile",
"(",
"ate_bootstraps",
",",
"(",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"*",
"100",
",",
"axis",
"=",
"1",
")",
"ate_upper",
"=",
"np",
".",
"percentile",
"(",
"ate_bootstraps",
",",
"(",
"1",
"-",
"self",
".",
"ate_alpha",
"/",
"2",
")",
"*",
"100",
",",
"axis",
"=",
"1",
")",
"# set member variables back to global (currently last bootstrapped outcome)",
"self",
".",
"t_groups",
"=",
"t_groups_global",
"self",
".",
"_classes",
"=",
"_classes_global",
"self",
".",
"models_c",
"=",
"deepcopy",
"(",
"models_c_global",
")",
"self",
".",
"models_t",
"=",
"deepcopy",
"(",
"models_t_global",
")",
"return",
"ate",
",",
"ate_lower",
",",
"ate_upper"
] | [
180,
4
] | [
251,
44
] | python | en | ['en', 'it', 'en'] | True |
BaseTRegressor.__init__ | (self,
learner=None,
control_learner=None,
treatment_learner=None,
ate_alpha=.05,
control_name=0) | Initialize a T-learner regressor.
Args:
learner (model): a model to estimate control and treatment outcomes.
control_learner (model, optional): a model to estimate control outcomes
treatment_learner (model, optional): a model to estimate treatment outcomes
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
| Initialize a T-learner regressor. | def __init__(self,
learner=None,
control_learner=None,
treatment_learner=None,
ate_alpha=.05,
control_name=0):
"""Initialize a T-learner regressor.
Args:
learner (model): a model to estimate control and treatment outcomes.
control_learner (model, optional): a model to estimate control outcomes
treatment_learner (model, optional): a model to estimate treatment outcomes
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
"""
super().__init__(
learner=learner,
control_learner=control_learner,
treatment_learner=treatment_learner,
ate_alpha=ate_alpha,
control_name=control_name) | [
"def",
"__init__",
"(",
"self",
",",
"learner",
"=",
"None",
",",
"control_learner",
"=",
"None",
",",
"treatment_learner",
"=",
"None",
",",
"ate_alpha",
"=",
".05",
",",
"control_name",
"=",
"0",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"learner",
"=",
"learner",
",",
"control_learner",
"=",
"control_learner",
",",
"treatment_learner",
"=",
"treatment_learner",
",",
"ate_alpha",
"=",
"ate_alpha",
",",
"control_name",
"=",
"control_name",
")"
] | [
259,
4
] | [
279,
38
] | python | co | ['en', 'co', 'it'] | False |
BaseTClassifier.__init__ | (self,
learner=None,
control_learner=None,
treatment_learner=None,
ate_alpha=.05,
control_name=0) | Initialize a T-learner classifier.
Args:
learner (model): a model to estimate control and treatment outcomes.
control_learner (model, optional): a model to estimate control outcomes
treatment_learner (model, optional): a model to estimate treatment outcomes
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
| Initialize a T-learner classifier. | def __init__(self,
learner=None,
control_learner=None,
treatment_learner=None,
ate_alpha=.05,
control_name=0):
"""Initialize a T-learner classifier.
Args:
learner (model): a model to estimate control and treatment outcomes.
control_learner (model, optional): a model to estimate control outcomes
treatment_learner (model, optional): a model to estimate treatment outcomes
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
"""
super().__init__(
learner=learner,
control_learner=control_learner,
treatment_learner=treatment_learner,
ate_alpha=ate_alpha,
control_name=control_name) | [
"def",
"__init__",
"(",
"self",
",",
"learner",
"=",
"None",
",",
"control_learner",
"=",
"None",
",",
"treatment_learner",
"=",
"None",
",",
"ate_alpha",
"=",
".05",
",",
"control_name",
"=",
"0",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"learner",
"=",
"learner",
",",
"control_learner",
"=",
"control_learner",
",",
"treatment_learner",
"=",
"treatment_learner",
",",
"ate_alpha",
"=",
"ate_alpha",
",",
"control_name",
"=",
"control_name",
")"
] | [
287,
4
] | [
307,
38
] | python | en | ['en', 'fy', 'en'] | True |
BaseTClassifier.predict | (self, X, treatment=None, y=None, p=None, return_components=False, verbose=True) | Predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series, optional): a treatment vector
y (np.array or pd.Series, optional): an outcome vector
verbose (bool, optional): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects.
| Predict treatment effects. | def predict(self, X, treatment=None, y=None, p=None, return_components=False, verbose=True):
"""Predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series, optional): a treatment vector
y (np.array or pd.Series, optional): an outcome vector
verbose (bool, optional): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects.
"""
yhat_cs = {}
yhat_ts = {}
for group in self.t_groups:
model_c = self.models_c[group]
model_t = self.models_t[group]
yhat_cs[group] = model_c.predict_proba(X)[:, 1]
yhat_ts[group] = model_t.predict_proba(X)[:, 1]
if (y is not None) and (treatment is not None) and verbose:
mask = (treatment == group) | (treatment == self.control_name)
treatment_filt = treatment[mask]
y_filt = y[mask]
w = (treatment_filt == group).astype(int)
yhat = np.zeros_like(y_filt, dtype=float)
yhat[w == 0] = yhat_cs[group][mask][w == 0]
yhat[w == 1] = yhat_ts[group][mask][w == 1]
logger.info('Error metrics for group {}'.format(group))
classification_metrics(y_filt, yhat, w)
te = np.zeros((X.shape[0], self.t_groups.shape[0]))
for i, group in enumerate(self.t_groups):
te[:, i] = yhat_ts[group] - yhat_cs[group]
if not return_components:
return te
else:
return te, yhat_cs, yhat_ts | [
"def",
"predict",
"(",
"self",
",",
"X",
",",
"treatment",
"=",
"None",
",",
"y",
"=",
"None",
",",
"p",
"=",
"None",
",",
"return_components",
"=",
"False",
",",
"verbose",
"=",
"True",
")",
":",
"yhat_cs",
"=",
"{",
"}",
"yhat_ts",
"=",
"{",
"}",
"for",
"group",
"in",
"self",
".",
"t_groups",
":",
"model_c",
"=",
"self",
".",
"models_c",
"[",
"group",
"]",
"model_t",
"=",
"self",
".",
"models_t",
"[",
"group",
"]",
"yhat_cs",
"[",
"group",
"]",
"=",
"model_c",
".",
"predict_proba",
"(",
"X",
")",
"[",
":",
",",
"1",
"]",
"yhat_ts",
"[",
"group",
"]",
"=",
"model_t",
".",
"predict_proba",
"(",
"X",
")",
"[",
":",
",",
"1",
"]",
"if",
"(",
"y",
"is",
"not",
"None",
")",
"and",
"(",
"treatment",
"is",
"not",
"None",
")",
"and",
"verbose",
":",
"mask",
"=",
"(",
"treatment",
"==",
"group",
")",
"|",
"(",
"treatment",
"==",
"self",
".",
"control_name",
")",
"treatment_filt",
"=",
"treatment",
"[",
"mask",
"]",
"y_filt",
"=",
"y",
"[",
"mask",
"]",
"w",
"=",
"(",
"treatment_filt",
"==",
"group",
")",
".",
"astype",
"(",
"int",
")",
"yhat",
"=",
"np",
".",
"zeros_like",
"(",
"y_filt",
",",
"dtype",
"=",
"float",
")",
"yhat",
"[",
"w",
"==",
"0",
"]",
"=",
"yhat_cs",
"[",
"group",
"]",
"[",
"mask",
"]",
"[",
"w",
"==",
"0",
"]",
"yhat",
"[",
"w",
"==",
"1",
"]",
"=",
"yhat_ts",
"[",
"group",
"]",
"[",
"mask",
"]",
"[",
"w",
"==",
"1",
"]",
"logger",
".",
"info",
"(",
"'Error metrics for group {}'",
".",
"format",
"(",
"group",
")",
")",
"classification_metrics",
"(",
"y_filt",
",",
"yhat",
",",
"w",
")",
"te",
"=",
"np",
".",
"zeros",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"t_groups",
".",
"shape",
"[",
"0",
"]",
")",
")",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"t_groups",
")",
":",
"te",
"[",
":",
",",
"i",
"]",
"=",
"yhat_ts",
"[",
"group",
"]",
"-",
"yhat_cs",
"[",
"group",
"]",
"if",
"not",
"return_components",
":",
"return",
"te",
"else",
":",
"return",
"te",
",",
"yhat_cs",
",",
"yhat_ts"
] | [
309,
4
] | [
349,
39
] | python | en | ['fr', 'en', 'en'] | True |
XGBTRegressor.__init__ | (self, ate_alpha=.05, control_name=0, *args, **kwargs) | Initialize a T-learner with two XGBoost models. | Initialize a T-learner with two XGBoost models. | def __init__(self, ate_alpha=.05, control_name=0, *args, **kwargs):
"""Initialize a T-learner with two XGBoost models."""
super().__init__(learner=XGBRegressor(*args, **kwargs),
ate_alpha=ate_alpha,
control_name=control_name) | [
"def",
"__init__",
"(",
"self",
",",
"ate_alpha",
"=",
".05",
",",
"control_name",
"=",
"0",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"learner",
"=",
"XGBRegressor",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"ate_alpha",
"=",
"ate_alpha",
",",
"control_name",
"=",
"control_name",
")"
] | [
353,
4
] | [
357,
51
] | python | en | ['en', 'en', 'en'] | True |
MLPTRegressor.__init__ | (self, ate_alpha=.05, control_name=0, *args, **kwargs) | Initialize a T-learner with two MLP models. | Initialize a T-learner with two MLP models. | def __init__(self, ate_alpha=.05, control_name=0, *args, **kwargs):
"""Initialize a T-learner with two MLP models."""
super().__init__(learner=MLPRegressor(*args, **kwargs),
ate_alpha=ate_alpha,
control_name=control_name) | [
"def",
"__init__",
"(",
"self",
",",
"ate_alpha",
"=",
".05",
",",
"control_name",
"=",
"0",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"learner",
"=",
"MLPRegressor",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"ate_alpha",
"=",
"ate_alpha",
",",
"control_name",
"=",
"control_name",
")"
] | [
361,
4
] | [
365,
51
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.