blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ecf02569e1eed908e82519db1a07d43581f22119 | cd4be8b6bee2964d063b332c0c8784ab6c89c8e5 | /opacus/layers/dp_rnn.py | 5f5c681988cdae0f1f9b9ff71207e8204edf7691 | [
"Apache-2.0"
]
| permissive | pytorch/opacus | d55f9c3627943a3c067528849401663cfaf7d622 | 79bdfac28afb526430a938d38513c46936f8670a | refs/heads/main | 2023-09-04T01:03:50.533043 | 2023-08-01T19:37:56 | 2023-08-01T19:37:56 | 226,441,159 | 1,358 | 291 | Apache-2.0 | 2023-09-11T13:29:37 | 2019-12-07T01:58:09 | Jupyter Notebook | UTF-8 | Python | false | false | 25,884 | py | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numbers
import warnings
from typing import List, Optional, Tuple, Type, Union
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn.utils.rnn import PackedSequence
from ..utils.packed_sequences import compute_seq_lengths
from .param_rename import RenameParamsMixin
def apply_permutation(tensor: Tensor, dim: int, permutation: Optional[Tensor]):
"""
Permute elements of a tensor along a dimension `dim`. If permutation is None do nothing.
"""
if permutation is None:
return tensor
return tensor.index_select(dim, permutation)
class RNNLinear(nn.Linear):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
This module is the same as a ``torch.nn.Linear``` layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear).
When used with `PackedSequence`s, additional attribute `max_batch_len` is defined to determine
the size of per-sample grad tensor.
"""
max_batch_len: int
def __init__(self, in_features: int, out_features: int, bias: bool = True):
super().__init__(in_features, out_features, bias)
class DPRNNCellBase(nn.Module):
has_cell_state: bool = False
def __init__(
self, input_size: int, hidden_size: int, bias: bool, num_chunks: int
) -> None:
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = RNNLinear(input_size, num_chunks * hidden_size, bias)
self.hh = RNNLinear(hidden_size, num_chunks * hidden_size, bias)
self.reset_parameters()
def reset_parameters(self) -> None:
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: int) -> None:
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
class DPRNNCell(DPRNNCellBase):
"""An Elman RNN cell with tanh or ReLU non-linearity.
DP-friendly drop-in replacement of the ``torch.nn.RNNCell`` module to use in ``DPRNN``.
Refer to ``torch.nn.RNNCell`` documentation for the model description, parameters and inputs/outputs.
"""
def __init__(
self, input_size: int, hidden_size: int, bias: bool, nonlinearity: str = "tanh"
) -> None:
super().__init__(input_size, hidden_size, bias, num_chunks=1)
if nonlinearity not in ("tanh", "relu"):
raise ValueError(f"Unsupported nonlinearity: {nonlinearity}")
self.nonlinearity = nonlinearity
def forward(
self,
input: Tensor,
hx: Optional[Tensor] = None,
batch_size_t: Optional[int] = None,
) -> Tensor:
if hx is None:
hx = torch.zeros(
input.shape[0], self.hidden_size, dtype=input.dtype, device=input.device
)
h_prev = hx
gates = self.ih(input) + self.hh(
h_prev if batch_size_t is None else h_prev[:batch_size_t, :]
)
if self.nonlinearity == "tanh":
h_t = torch.tanh(gates)
elif self.nonlinearity == "relu":
h_t = torch.relu(gates)
else:
raise RuntimeError(f"Unknown nonlinearity: {self.nonlinearity}")
return h_t
class DPGRUCell(DPRNNCellBase):
"""A gated recurrent unit (GRU) cell
DP-friendly drop-in replacement of the ``torch.nn.GRUCell`` module to use in ``DPGRU``.
Refer to ``torch.nn.GRUCell`` documentation for the model description, parameters and inputs/outputs.
"""
def __init__(self, input_size: int, hidden_size: int, bias: bool) -> None:
super().__init__(input_size, hidden_size, bias, num_chunks=3)
def forward(
self,
input: Tensor,
hx: Optional[Tensor] = None,
batch_size_t: Optional[int] = None,
) -> Tensor:
if hx is None:
hx = torch.zeros(
input.shape[0], self.hidden_size, dtype=input.dtype, device=input.device
)
h_prev = hx if batch_size_t is None else hx[:batch_size_t, :]
gates_x = self.ih(input)
gates_h = self.hh(h_prev)
r_t_input_x, z_t_input_x, n_t_input_x = torch.split(
gates_x, self.hidden_size, 1
)
r_t_input_h, z_t_input_h, n_t_input_h = torch.split(
gates_h, self.hidden_size, 1
)
r_t = torch.sigmoid(r_t_input_x + r_t_input_h)
z_t = torch.sigmoid(z_t_input_x + z_t_input_h)
n_t = torch.tanh(n_t_input_x + r_t * n_t_input_h)
h_t = (1 - z_t) * n_t + z_t * h_prev
return h_t
class DPLSTMCell(DPRNNCellBase):
"""A long short-term memory (LSTM) cell.
DP-friendly drop-in replacement of the ``torch.nn.LSTMCell`` module to use in ``DPLSTM``.
Refer to ``torch.nn.LSTMCell`` documentation for the model description, parameters and inputs/outputs.
"""
has_cell_state = True
def __init__(self, input_size: int, hidden_size: int, bias: bool) -> None:
super().__init__(input_size, hidden_size, bias, num_chunks=4)
def forward(
self,
input: Tensor,
hx: Optional[Tuple[Tensor, Tensor]] = None,
batch_size_t: Optional[int] = None,
) -> Tuple[Tensor, Tensor]:
if hx is None:
zeros = torch.zeros(
input.shape[0], self.hidden_size, dtype=input.dtype, device=input.device
)
hx = (zeros, zeros)
h_prev, c_prev = hx
if batch_size_t is None:
gates = self.ih(input) + self.hh(h_prev) # [B, 4*D]
else:
gates = self.ih(input) + self.hh(
h_prev[:batch_size_t, :]
) # [batch_size_t, 4*D]
i_t_input, f_t_input, g_t_input, o_t_input = torch.split(
gates, self.hidden_size, 1
)
# [B, D] or [batch_size_t, D] if batch_size_t is not None
i_t = torch.sigmoid(i_t_input)
f_t = torch.sigmoid(f_t_input)
g_t = torch.tanh(g_t_input)
o_t = torch.sigmoid(o_t_input)
if batch_size_t is None:
c_t = f_t * c_prev + i_t * g_t
else:
c_t = f_t * c_prev[:batch_size_t, :] + i_t * g_t
h_t = o_t * torch.tanh(c_t)
return h_t, c_t
RNN_CELL_TYPES = {
"RNN_TANH": (DPRNNCell, {"nonlinearity": "tanh"}),
"RNN_RELU": (DPRNNCell, {"nonlinearity": "relu"}),
"GRU": (DPGRUCell, {}),
"LSTM": (DPLSTMCell, {}),
}
class DPRNNBase(RenameParamsMixin, nn.Module):
"""Base class for all RNN-like sequence models.
DP-friendly drop-in replacement of the ``torch.nn.RNNBase`` module.
After training this module can be exported and loaded by the original ``torch.nn``
implementation for inference.
This module implements multi-layer (Type-2, see
[this issue](https://github.com/pytorch/pytorch/issues/4930#issuecomment-361851298))
bi-directional sequential model based on abstract cell.
Cell should be a subclass of ``DPRNNCellBase``.
Limitations:
- proj_size > 0 is not implemented
- this implementation doesn't use cuDNN
"""
def __init__(
self,
mode: Union[str, Type[DPRNNCellBase]],
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
batch_first: bool = False,
dropout: float = 0.0,
bidirectional: bool = False,
proj_size: int = 0,
cell_params: Optional[dict] = None,
) -> None:
super().__init__()
self.cell_params = {}
if isinstance(mode, str):
if mode not in RNN_CELL_TYPES:
raise ValueError(
f"Invalid RNN mode '{mode}', available options: {list(RNN_CELL_TYPES.keys())}"
)
self.cell_type, default_params = RNN_CELL_TYPES[mode]
self.cell_params.update(default_params)
else:
self.cell_type = mode
if cell_params is not None:
self.cell_params.update(cell_params)
self.has_cell_state = self.cell_type.has_cell_state
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = float(dropout)
self.bidirectional = bidirectional
self.proj_size = proj_size
self.num_directions = 2 if bidirectional else 1
if (
not isinstance(dropout, numbers.Number)
or not 0 <= dropout <= 1
or isinstance(dropout, bool)
):
raise ValueError(
"dropout should be a number in range [0, 1] "
"representing the probability of an element being "
"zeroed"
)
if dropout > 0 and num_layers == 1:
warnings.warn(
"dropout option adds dropout after all but last "
"recurrent layer, so non-zero dropout expects "
"num_layers greater than 1, but got dropout={} and "
"num_layers={}".format(dropout, num_layers)
)
if proj_size > 0:
raise NotImplementedError("proj_size > 0 is not supported")
if proj_size < 0:
raise ValueError(
"proj_size should be a positive integer or zero to disable projections"
)
if proj_size >= hidden_size:
raise ValueError("proj_size has to be smaller than hidden_size")
self.dropout_layer = nn.Dropout(dropout) if dropout > 0 else None
self.cells = self.initialize_cells()
# flake8: noqa C901
def forward(
self,
input: Union[Tensor, PackedSequence],
state_init: Optional[Union[Tensor, Tuple[Tensor, Tensor]]] = None,
) -> Tuple[Union[Tensor, PackedSequence], Union[Tensor, Tuple[Tensor, Tensor]]]:
"""
Forward pass of a full RNN, containing one or many single- or bi-directional layers.
Implemented for an abstract cell type.
Note: ``proj_size > 0`` is not supported here.
Cell state size is always equal to hidden state size.
Inputs: input, h_0/(h_0, c_0)
input: Input sequence. Tensor of shape ``[T, B, D]`` (``[B, T, D]`` if ``batch_first=True``)
or PackedSequence.
h_0: Initial hidden state for each element in the batch. Tensor of shape ``[L*P, B, H]``. Default to zeros.
c_0: Initial cell state for each element in the batch. Only for cell types with an additional state.
Tensor of shape ``[L*P, B, H]``. Default to zeros.
Outputs: output, h_n/(h_n, c_n)
output: Output features (``h_t``) from the last layer of the model for each ``t``. Tensor of
shape ``[T, B, P*H]`` (``[B, T, P*H]`` if ``batch_first=True``), or PackedSequence.
h_n: Final hidden state for each element in the batch. Tensor of shape ``[L*P, B, H]``.
c_n: Final cell state for each element in the batch. Tensor of shape ``[L*P, B, H]``.
where
T = sequence length
B = batch size
D = input_size
H = hidden_size
L = num_layers
P = num_directions (2 if `bidirectional=True` else 1)
"""
num_directions = 2 if self.bidirectional else 1
is_packed = isinstance(input, PackedSequence)
if is_packed:
input_data, batch_sizes, sorted_indices, unsorted_indices = input
dtype, device = input_data.dtype, input_data.device
x = input_data.split(tuple(batch_sizes)) # tuple T x [B, D]
seq_length = len(batch_sizes)
max_batch_size = int(batch_sizes[0])
for cell in self.cells:
cell.set_max_batch_length(max_batch_size)
else:
dtype, device = input.dtype, input.device
batch_sizes = None
sorted_indices = None
unsorted_indices = None
# Rearrange batch dim. Batch is by default in second dimension.
if self.batch_first:
input = input.transpose(0, 1)
x = input # [T, B, D]
seq_length = x.shape[0]
max_batch_size = x.shape[1]
if self.has_cell_state:
h_0s, c_0s = state_init or (None, None)
else:
h_0s, c_0s = state_init, None
if h_0s is None:
h_0s = torch.zeros( # [L*P, B, H]
self.num_layers * num_directions,
max_batch_size,
self.hidden_size,
dtype=dtype,
device=device,
)
else:
h_0s = apply_permutation(h_0s, 1, sorted_indices)
if self.has_cell_state:
if c_0s is None:
c_0s = torch.zeros( # [L*P, B, H]
self.num_layers * num_directions,
max_batch_size,
self.hidden_size,
dtype=dtype,
device=device,
)
else:
c_0s = apply_permutation(c_0s, 1, sorted_indices)
else:
c_0s = [None] * len(h_0s)
hs = []
cs = [] # list of None if no cell state
output = None
for layer, directions in self.iterate_layers(self.cells, h_0s, c_0s):
layer_outs = []
for direction, (cell, h0, c0) in directions:
# apply single direction layer (with dropout)
out_layer, h, c = self.forward_layer(
x
if layer == 0
else output, # [T, B, D/H/2H] / tuple T x [B, D/H/2H]
h0, # [B, H]
c0,
batch_sizes,
cell=cell,
max_batch_size=max_batch_size,
seq_length=seq_length,
is_packed=is_packed,
reverse_layer=(direction == 1),
)
hs.append(h) # h: [B, H]
cs.append(c)
layer_outs.append(out_layer) # out_layer: [T, B, H] / tuple T x [B, H]
if is_packed:
output = [ # tuple T x [B, P*H]
torch.cat([layer_out[i] for layer_out in layer_outs], dim=1)
for i in range(seq_length)
]
else:
output = torch.cat(layer_outs, dim=2) # [T, B, P*H]
if is_packed:
packed_data = torch.cat(output, dim=0) # [TB, P*H]
output = PackedSequence(
packed_data, batch_sizes, sorted_indices, unsorted_indices
)
else:
# Rearrange batch dim back
if self.batch_first:
output = output.transpose(0, 1)
hs = torch.stack(hs, dim=0).to(device) # [L*P, B, H]
hs = apply_permutation(hs, 1, unsorted_indices)
if self.has_cell_state:
cs = torch.stack(cs, dim=0).to(device) # [L*P, B, H]
cs = apply_permutation(cs, 1, unsorted_indices)
hidden = (hs, cs) if self.has_cell_state else hs
return output, hidden
# flake8: noqa C901
def forward_layer(
self,
x: Union[Tensor, PackedSequence],
h_0: Tensor,
c_0: Optional[Tensor],
batch_sizes: Tensor,
cell: DPRNNCellBase,
max_batch_size: int,
seq_length: int,
is_packed: bool,
reverse_layer: bool,
) -> Tuple[Union[Tensor, List[Tensor]], Tensor, Tensor]:
"""
Forward pass of a single RNN layer (one direction). Implemented for an abstract cell type.
Inputs: x, h_0, c_0
x: Input sequence. Tensor of shape ``[T, B, D]`` or PackedSequence if `is_packed = True`.
h_0: Initial hidden state. Tensor of shape ``[B, H]``.
c_0: Initial cell state. Tensor of shape ``[B, H]``. Only for cells with additional
state `c_t`, e.g. DPLSTMCell.
Outputs: h_t, h_last, c_last
h_t: Final hidden state, output features (``h_t``) for each timestep ``t``. Tensor of
shape ``[T, B, H]`` or list of length ``T`` with tensors ``[B, H]`` if PackedSequence is used.
h_last: The last hidden state. Tensor of shape ``[B, H]``.
c_last: The last cell state. Tensor of shape ``[B, H]``. None if cell has no additional state.
where
T = sequence length
B = batch size
D = input_size (for this specific layer)
H = hidden_size (output size, for this specific layer)
Args:
batch_sizes: Contains the batch sizes as stored in PackedSequence
cell: Module implementing a single cell of the network, must be an instance of DPRNNCell
max_batch_size: batch size
seq_length: sequence length
is_packed: whether PackedSequence is used as input
reverse_layer: if True, it will run forward pass for a reversed layer
"""
if is_packed:
if reverse_layer:
x = tuple(reversed(x))
batch_sizes = batch_sizes.flip(0)
else:
if reverse_layer:
x = x.flip(0)
x = torch.unbind(x, dim=0)
h_n = [h_0]
c_n = [c_0]
c_next = c_0
batch_size_prev = h_0.shape[0]
for t in range(seq_length):
if is_packed:
batch_size_t = batch_sizes[t].item()
delta = batch_size_t - batch_size_prev
if delta > 0:
h_cat = torch.cat((h_n[t], h_0[batch_size_prev:batch_size_t, :]), 0)
if self.has_cell_state:
c_cat = torch.cat(
(c_n[t], c_0[batch_size_prev:batch_size_t, :]), 0
)
h_next, c_next = cell(x[t], (h_cat, c_cat), batch_size_t)
else:
h_next = cell(x[t], h_cat, batch_size_t)
else:
if self.has_cell_state:
h_next, c_next = cell(x[t], (h_n[t], c_n[t]), batch_size_t)
else:
h_next = cell(x[t], h_n[t], batch_size_t)
else:
if self.has_cell_state:
h_next, c_next = cell(x[t], (h_n[t], c_n[t]))
else:
h_next = cell(x[t], h_n[t])
if self.dropout:
h_next = self.dropout_layer(h_next)
h_n.append(h_next)
c_n.append(c_next)
batch_size_prev = h_next.shape[0]
if is_packed:
h_temp = h_n[1:] # list T x [B, H]
c_temp = c_n[1:]
# Collect last states for all sequences
seq_lengths = compute_seq_lengths(batch_sizes)
h_last = torch.zeros(max_batch_size, self.hidden_size) # [B, H]
c_last = (
torch.zeros(max_batch_size, self.hidden_size)
if self.has_cell_state
else None
)
for i, seq_len in enumerate(seq_lengths):
h_last[i, :] = h_temp[seq_len - 1][i, :]
if self.has_cell_state:
c_last[i, :] = c_temp[seq_len - 1][i, :]
if reverse_layer:
h_temp = tuple(reversed(h_temp))
else:
h_n = torch.stack(h_n[1:], dim=0) # [T, B, H], init step not part of output
h_temp = h_n if not reverse_layer else h_n.flip(0) # Flip the output...
h_last = h_n[-1] # ... But not the states
c_last = c_n[-1]
return h_temp, h_last, c_last
def iterate_layers(self, *args):
"""
Iterate through all the layers and through all directions within each layer.
Arguments should be list-like of length ``num_layers * num_directions`` where
each element corresponds to (layer, direction) pair. The corresponding elements
of each of these lists will be iterated over.
Example:
num_layers = 3
bidirectional = True
for layer, directions in self.iterate_layers(self.cell, h):
for dir, (cell, hi) in directions:
print(layer, dir, hi)
# 0 0 h[0]
# 0 1 h[1]
# 1 0 h[2]
# 1 1 h[3]
# 2 0 h[4]
# 2 1 h[5]
"""
for layer in range(self.num_layers):
yield layer, (
(
direction,
tuple(arg[self.num_directions * layer + direction] for arg in args),
)
for direction in range(self.num_directions)
)
def initialize_cells(self):
cells = []
rename_map = {}
for layer, directions in self.iterate_layers():
for direction, _ in directions:
layer_input_size = (
self.input_size
if layer == 0
else self.hidden_size * self.num_directions
)
cell = self.cell_type(
layer_input_size,
self.hidden_size,
bias=self.bias,
**self.cell_params,
)
cells.append(cell)
suffix = "_reverse" if direction == 1 else ""
cell_name = f"l{layer}{suffix}"
setattr(self, cell_name, cell)
components = ["weight"] + ["bias" if self.bias else []]
matrices = ["ih", "hh"]
for c in components:
for m in matrices:
rename_map[f"{cell_name}.{m}.{c}"] = f"{c}_{m}_{cell_name}"
self.set_rename_map(rename_map)
return cells
class DPRNN(DPRNNBase):
"""Applies a multi-layer Elman RNN with :math:`\tanh` or :math:`\text{ReLU}` non-linearity to an
input sequence.
DP-friendly drop-in replacement of the ``torch.nn.RNN`` module.
Refer to ``torch.nn.RNN`` documentation for the model description, parameters and inputs/outputs.
After training this module can be exported and loaded by the original ``torch.nn`` implementation for inference.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
batch_first: bool = False,
dropout: float = 0,
bidirectional: bool = False,
proj_size: int = 0,
nonlinearity: str = "tanh",
) -> None:
super().__init__(
DPRNNCell,
input_size,
hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
proj_size=proj_size,
cell_params={"nonlinearity": nonlinearity},
)
class DPGRU(DPRNNBase):
"""Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence.
DP-friendly drop-in replacement of the ``torch.nn.GRU`` module.
Refer to ``torch.nn.GRU`` documentation for the model description, parameters and inputs/outputs.
After training this module can be exported and loaded by the original ``torch.nn`` implementation for inference.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
batch_first: bool = False,
dropout: float = 0,
bidirectional: bool = False,
proj_size: int = 0,
) -> None:
super().__init__(
DPGRUCell,
input_size,
hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
proj_size=proj_size,
)
class DPLSTM(DPRNNBase):
"""Applies a multi-layer long short-term memory (LSTM) RNN to an input
sequence.
DP-friendly drop-in replacement of the ``torch.nn.LSTM`` module.
Refer to ``torch.nn.LSTM`` documentation for the model description, parameters and inputs/outputs.
After training this module can be exported and loaded by the original ``torch.nn`` implementation for inference.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
batch_first: bool = False,
dropout: float = 0,
bidirectional: bool = False,
proj_size: int = 0,
) -> None:
super().__init__(
DPLSTMCell,
input_size,
hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
proj_size=proj_size,
)
| [
"[email protected]"
]
| |
13d8e018f655410c792838106349c05d7a3368a0 | e43331bd888313b6cf0a6e1c62767b75150f5223 | /archivrflickr/migrations/0001_initial.py | 4db46b9166fca848150fd872e562d5879816a667 | []
| no_license | philgyford/django-archivr | a10cf7a0752b88b490cbccfef4fc27ff69447db3 | e0611d76d15eabb5fbd959d46e4e33c211c91a33 | refs/heads/master | 2021-01-18T22:04:00.699789 | 2016-04-08T16:26:18 | 2016-04-08T16:26:18 | 4,202,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,763 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FlickrFavorite'
db.create_table('archivrflickr_flickrfavorite', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('photo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['archivrflickr.FlickrPhoto'])),
('favorite_list', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['archivrflickr.FlickrFavoriteList'])),
('date_faved', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('archivrflickr', ['FlickrFavorite'])
# Adding model 'FlickrFavoriteList'
db.create_table('archivrflickr_flickrfavoritelist', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['archivrflickr.FlickrUser'])),
('date_archived', self.gf('django.db.models.fields.DateTimeField')()),
('primary', self.gf('django.db.models.fields.related.ForeignKey')(related_name='primary_in', null=True, to=orm['archivrflickr.FlickrPhoto'])),
))
db.send_create_signal('archivrflickr', ['FlickrFavoriteList'])
# Adding model 'FlickrPhoto'
db.create_table('archivrflickr_flickrphoto', (
('archivritem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['archivr.ArchivrItem'], unique=True, primary_key=True)),
('flickr_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['archivrflickr.FlickrUser'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('posted_date', self.gf('django.db.models.fields.DateTimeField')()),
('updated_date', self.gf('django.db.models.fields.DateTimeField')()),
('taken_date', self.gf('django.db.models.fields.DateTimeField')()),
('taken_granularity', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('comments', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('visibility_is_public', self.gf('django.db.models.fields.BooleanField')(default=False)),
('visibility_is_friend', self.gf('django.db.models.fields.BooleanField')(default=False)),
('visibility_is_family', self.gf('django.db.models.fields.BooleanField')(default=False)),
('photopage_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('farm', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
('server', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
('secret', self.gf('django.db.models.fields.CharField')(max_length=10)),
('original_secret', self.gf('django.db.models.fields.CharField')(max_length=10, blank=True)),
('original_format', self.gf('django.db.models.fields.CharField')(max_length=10, blank=True)),
('safety_level', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('rotation', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('license', self.gf('django.db.models.fields.CharField')(max_length=50)),
('large_width', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('large_height', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('largesquare_width', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('largesquare_height', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('medium640_width', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('medium640_height', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('medium800_width', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('medium800_height', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('medium_width', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('medium_height', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('original_width', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('original_height', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('small320_width', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('small320_height', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('small_width', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('small_height', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('square_width', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('square_height', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('thumbnail_width', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('thumbnail_height', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('is_video', self.gf('django.db.models.fields.BooleanField')(default=False)),
('video_duration', self.gf('django.db.models.fields.PositiveIntegerField')(null=True)),
('video_width', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('video_height', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('geo_latitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=6, blank=True)),
('geo_longitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=6, blank=True)),
('geo_accuracy', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('geo_place_id', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('geo_woe_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('geo_county', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('geo_county_place_id', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('geo_county_woe_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('geo_country', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('geo_country_place_id', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('geo_country_woe_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('geo_locality', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('geo_locality_place_id', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('geo_locality_woe_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('geo_neighbourhood', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('geo_neighbourhood_place_id', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('geo_neighbourhood_woe_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('geo_region', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('geo_region_place_id', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('geo_region_woe_id', self.gf('django.db.models.fields.PositiveIntegerField')(max_length=50, null=True, blank=True)),
('geo_perms_is_public', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('geo_perms_is_contact', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('geo_perms_is_friend', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('geo_perms_is_family', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('exif_aperture', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('exif_color_space', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('exif_exposure', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('exif_flash', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('exif_focal_length', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('exif_iso', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('exif_make', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('exif_metering_mode', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('exif_model', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('exif_orientation', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('exif_software', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
))
db.send_create_signal('archivrflickr', ['FlickrPhoto'])
# Adding model 'FlickrPhotoComment'
db.create_table('archivrflickr_flickrphotocomment', (
('flickr_id', self.gf('django.db.models.fields.CharField')(max_length=128, primary_key=True)),
('photo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['archivrflickr.FlickrPhoto'])),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['archivrflickr.FlickrUser'])),
('pub_date', self.gf('django.db.models.fields.DateTimeField')()),
('permanent_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('comment', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('archivrflickr', ['FlickrPhotoComment'])
# Adding model 'FlickrPhotoset'
db.create_table('archivrflickr_flickrphotoset', (
('flickr_id', self.gf('django.db.models.fields.CharField')(max_length=50, primary_key=True)),
('primary', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='primary_photo_set', null=True, to=orm['archivrflickr.FlickrPhoto'])),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['archivrflickr.FlickrUser'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('order', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
))
db.send_create_signal('archivrflickr', ['FlickrPhotoset'])
# Adding M2M table for field photos on 'FlickrPhotoset'
db.create_table('archivrflickr_flickrphotoset_photos', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('flickrphotoset', models.ForeignKey(orm['archivrflickr.flickrphotoset'], null=False)),
('flickrphoto', models.ForeignKey(orm['archivrflickr.flickrphoto'], null=False))
))
db.create_unique('archivrflickr_flickrphotoset_photos', ['flickrphotoset_id', 'flickrphoto_id'])
# Adding model 'FlickrPhotoTag'
db.create_table('archivrflickr_flickrphototag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tag', self.gf('django.db.models.fields.related.ForeignKey')(related_name='archivrflickr_flickrphototag_items', to=orm['taggit.Tag'])),
('flickr_id', self.gf('django.db.models.fields.CharField')(max_length=255)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['archivrflickr.FlickrUser'])),
('machine_tag', self.gf('django.db.models.fields.BooleanField')(default=False)),
('content_object', self.gf('django.db.models.fields.related.ForeignKey')(related_name='archivrflickr_flickrphototag_items', to=orm['archivrflickr.FlickrPhoto'])),
))
db.send_create_signal('archivrflickr', ['FlickrPhotoTag'])
# Adding model 'FlickrUser'
db.create_table('archivrflickr_flickruser', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nsid', self.gf('django.db.models.fields.CharField')(max_length=50)),
('username', self.gf('django.db.models.fields.CharField')(max_length=255)),
('realname', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('path_alias', self.gf('django.db.models.fields.CharField')(max_length=50)),
('location', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('photos_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('profile_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('mobile_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('icon_server', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('icon_farm', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('is_pro', self.gf('django.db.models.fields.BooleanField')(default=False)),
('photos_first_date_taken', self.gf('django.db.models.fields.DateTimeField')()),
('photos_first_date', self.gf('django.db.models.fields.DateTimeField')()),
('photos_count', self.gf('django.db.models.fields.PositiveIntegerField')()),
('photos_views', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
))
db.send_create_signal('archivrflickr', ['FlickrUser'])
def backwards(self, orm):
# Deleting model 'FlickrFavorite'
db.delete_table('archivrflickr_flickrfavorite')
# Deleting model 'FlickrFavoriteList'
db.delete_table('archivrflickr_flickrfavoritelist')
# Deleting model 'FlickrPhoto'
db.delete_table('archivrflickr_flickrphoto')
# Deleting model 'FlickrPhotoComment'
db.delete_table('archivrflickr_flickrphotocomment')
# Deleting model 'FlickrPhotoset'
db.delete_table('archivrflickr_flickrphotoset')
# Removing M2M table for field photos on 'FlickrPhotoset'
db.delete_table('archivrflickr_flickrphotoset_photos')
# Deleting model 'FlickrPhotoTag'
db.delete_table('archivrflickr_flickrphototag')
# Deleting model 'FlickrUser'
db.delete_table('archivrflickr_flickruser')
models = {
'archivr.archivritem': {
'Meta': {'ordering': "('-order_date',)", 'object_name': 'ArchivrItem'},
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_genre': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '6', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '6', 'blank': 'True'}),
'order_date': ('django.db.models.fields.DateTimeField', [], {})
},
'archivrflickr.flickrfavorite': {
'Meta': {'object_name': 'FlickrFavorite'},
'date_faved': ('django.db.models.fields.DateTimeField', [], {}),
'favorite_list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['archivrflickr.FlickrFavoriteList']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['archivrflickr.FlickrPhoto']"})
},
'archivrflickr.flickrfavoritelist': {
'Meta': {'object_name': 'FlickrFavoriteList'},
'date_archived': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['archivrflickr.FlickrUser']"}),
'photos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['archivrflickr.FlickrPhoto']", 'through': "orm['archivrflickr.FlickrFavorite']", 'symmetrical': 'False'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_in'", 'null': 'True', 'to': "orm['archivrflickr.FlickrPhoto']"})
},
'archivrflickr.flickrphoto': {
'Meta': {'ordering': "('-taken_date',)", 'object_name': 'FlickrPhoto', '_ormbases': ['archivr.ArchivrItem']},
'archivritem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['archivr.ArchivrItem']", 'unique': 'True', 'primary_key': 'True'}),
'comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'exif_aperture': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'exif_color_space': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'exif_exposure': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'exif_flash': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'exif_focal_length': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'exif_iso': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'exif_make': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'exif_metering_mode': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'exif_model': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'exif_orientation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'exif_software': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'farm': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'flickr_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'geo_accuracy': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'geo_country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_country_place_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'geo_country_woe_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'geo_county': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_county_place_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'geo_county_woe_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'geo_latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '6', 'blank': 'True'}),
'geo_locality': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_locality_place_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'geo_locality_woe_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'geo_longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '6', 'blank': 'True'}),
'geo_neighbourhood': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_neighbourhood_place_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'geo_neighbourhood_woe_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'geo_perms_is_contact': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'geo_perms_is_family': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'geo_perms_is_friend': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'geo_perms_is_public': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'geo_place_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'geo_region': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_region_place_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'geo_region_woe_id': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'geo_woe_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_video': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'large_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'large_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'largesquare_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'largesquare_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'medium640_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'medium640_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'medium800_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'medium800_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'medium_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'medium_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'original_format': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'original_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'original_secret': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'original_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['archivrflickr.FlickrUser']"}),
'photopage_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'posted_date': ('django.db.models.fields.DateTimeField', [], {}),
'rotation': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'safety_level': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'server': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'small320_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'small320_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'small_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'small_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'square_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'square_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'taken_date': ('django.db.models.fields.DateTimeField', [], {}),
'taken_granularity': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'thumbnail_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'thumbnail_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {}),
'video_duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'video_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'video_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'visibility_is_family': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'visibility_is_friend': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'visibility_is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'archivrflickr.flickrphotocomment': {
'Meta': {'ordering': "('pub_date',)", 'object_name': 'FlickrPhotoComment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['archivrflickr.FlickrUser']"}),
'comment': ('django.db.models.fields.TextField', [], {}),
'flickr_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'primary_key': 'True'}),
'permanent_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['archivrflickr.FlickrPhoto']"}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {})
},
'archivrflickr.flickrphotoset': {
'Meta': {'ordering': "('order',)", 'object_name': 'FlickrPhotoset'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'flickr_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['archivrflickr.FlickrUser']"}),
'photos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['archivrflickr.FlickrPhoto']", 'symmetrical': 'False'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'primary_photo_set'", 'null': 'True', 'to': "orm['archivrflickr.FlickrPhoto']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'archivrflickr.flickrphototag': {
'Meta': {'object_name': 'FlickrPhotoTag'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['archivrflickr.FlickrUser']"}),
'content_object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'archivrflickr_flickrphototag_items'", 'to': "orm['archivrflickr.FlickrPhoto']"}),
'flickr_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'machine_tag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'archivrflickr_flickrphototag_items'", 'to': "orm['taggit.Tag']"})
},
'archivrflickr.flickruser': {
'Meta': {'object_name': 'FlickrUser'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'icon_farm': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'icon_server': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_pro': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'mobile_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'nsid': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'path_alias': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'photos_count': ('django.db.models.fields.PositiveIntegerField', [], {}),
'photos_first_date': ('django.db.models.fields.DateTimeField', [], {}),
'photos_first_date_taken': ('django.db.models.fields.DateTimeField', [], {}),
'photos_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photos_views': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'profile_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'realname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['archivrflickr'] | [
"[email protected]"
]
| |
911a983b38870d5b30029913df017ccfc099817a | 549d8be84d27a1d6890c8539a519e58bd355351d | /examples/Serverless_Api_Backend.py | a0a2afbb3626a21ea3f17b0f3d8c9aa196248301 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | anoora17/troposphere | 1dabd3b4da89c593444c1564ef13fdae6e61acff | 47db869b2875b9517df5fdd90d5e15505a555b09 | refs/heads/master | 2020-03-17T23:32:55.048454 | 2018-05-17T17:24:39 | 2018-05-17T17:24:39 | 134,050,719 | 1 | 0 | BSD-2-Clause | 2018-05-19T10:05:51 | 2018-05-19T10:05:51 | null | UTF-8 | Python | false | false | 2,212 | py | # Converted from api_backend located at:
# https://github.com/awslabs/serverless-application-model/blob/dbc54b5d0cd31bf5cebd16d765b74aee9eb34641/examples/2016-10-31/api_backend/template.yaml
from troposphere import Template, Ref
from troposphere.awslambda import Environment
from troposphere.serverless import Function, ApiEvent, SimpleTable
t = Template()
t.add_description(
"Simple CRUD webservice. State is stored in a SimpleTable (DynamoDB) "
"resource.")
t.add_transform('AWS::Serverless-2016-10-31')
simple_table = t.add_resource(
SimpleTable("Table")
)
t.add_resource(
Function(
"GetFunction",
Handler='index.get',
Runtime='nodejs4.3',
CodeUri='s3://<bucket>/api_backend.zip',
Policies='AmazonDynamoDBReadOnlyAccess',
Environment=Environment(
Variables={
'TABLE_NAME': Ref(simple_table)
}
),
Events={
'GetResource': ApiEvent(
'GetResource',
Path='/resource/{resourceId}',
Method='get'
)
}
)
)
t.add_resource(
Function(
"PutFunction",
Handler='index.put',
Runtime='nodejs4.3',
CodeUri='s3://<bucket>/api_backend.zip',
Policies='AmazonDynamoDBReadOnlyAccess',
Environment=Environment(
Variables={
'TABLE_NAME': Ref(simple_table)
}
),
Events={
'PutResource': ApiEvent(
'PutResource',
Path='/resource/{resourceId}',
Method='put'
)
}
)
)
t.add_resource(
Function(
"DeleteFunction",
Handler='index.delete',
Runtime='nodejs4.3',
CodeUri='s3://<bucket>/api_backend.zip',
Policies='AmazonDynamoDBReadOnlyAccess',
Environment=Environment(
Variables={
'TABLE_NAME': Ref(simple_table)
}
),
Events={
'DeleteResource': ApiEvent(
'DeleteResource',
Path='/resource/{resourceId}',
Method='delete'
)
}
)
)
print(t.to_json())
| [
"[email protected]"
]
| |
6a5f7fe7878d43df06565b94f5e3b304e23823d5 | f57bba82fed27b74dca1319f41ed3cf9047fcc55 | /0x01-python-if_else_loops_functions/2-print_alphabet.py | 561690b60f55f71d5a836ab40614f35d9d817346 | []
| no_license | ibeckermayer/holbertonschool-higher_level_programming | cb76d64d698e7b2c8e60d8498c25ba31dc7e337f | 3cb19fb206a77cccbf12a2c2e06067fa1be303f1 | refs/heads/master | 2020-03-09T13:36:27.959941 | 2018-09-06T00:57:53 | 2018-09-06T00:57:53 | 128,815,447 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | #!/usr/bin/python3
for c in range(ord('a'), ord('z')+1):
print("{:c}".format(c), end='')
| [
"[email protected]"
]
| |
b146a8d58b6c44b8b91c6e10e0eee5d3ae1c1e03 | 365967082720f3fda31afccfc237b7a67e8ffc07 | /sorting_searching/peak.py | dd2d896b427e16191838c3197c5819483f3b6557 | []
| no_license | hulaba/geekInsideYou | ec68dee3fa24d63f5470aa40b600ef34d37c5da1 | 72c1f1b4fbf115db91c908a68c9ac3ca4cb22a4f | refs/heads/master | 2022-12-11T11:11:03.149336 | 2020-09-12T16:12:40 | 2020-09-12T16:12:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | # your task is to complete this function
# function should return index to the any valid peak element
def peakElement(arr, n):
# Code here
if n is 1:
return 0
for i in range(n):
# if element at first index is greater than next
if i == 0 and arr[1] < arr[0]:
return 0
# if element is at last index and it is greater than
# its prev one
elif i == n - 1 and arr[n - 2] < arr[n - 1]:
return n - 1
# case, when element is at any other index
# then you need to check both of its neighbour
elif arr[i - 1] < arr[i] and arr[i] > arr[i + 1]:
return i
# {
# Driver Code Starts
if __name__ == '__main__':
t = int(input())
for i in range(t):
n = int(input())
arr = list(map(int, input().strip().split()))
index = peakElement(arr, n)
flag = False
if index == 0 and n == 1:
flag = True
elif index == 0 and arr[index] >= arr[index + 1]:
flag = True
elif index == n - 1 and arr[index] >= arr[index - 1]:
flag = True
elif arr[index - 1] <= arr[index] and arr[index] >= arr[index + 1]:
flag = True
else:
flag = False
if flag:
print(1)
else:
print(0)
# } Driver Code Ends
| [
"[email protected]"
]
| |
5cad52e17f840954f11e4f3480533211c904549e | 956cc6ff2b58a69292f7d1223461bc9c2b9ea6f1 | /monk/system_unit_tests/pytorch/test_activation_softmin.py | cde9b1d95f3da0fc6c01dd83ce0386fe8fc78a97 | [
"Apache-2.0"
]
| permissive | Aanisha/monk_v1 | c24279b2b461df9b3de2984bae0e2583aba48143 | c9e89b2bc0c1dbb320aa6da5cba0aa1c1526ad72 | refs/heads/master | 2022-12-29T00:37:15.320129 | 2020-10-18T09:12:13 | 2020-10-18T09:12:13 | 286,278,278 | 0 | 0 | Apache-2.0 | 2020-08-09T16:51:02 | 2020-08-09T16:51:02 | null | UTF-8 | Python | false | false | 1,348 | py | import os
import sys
sys.path.append("../../../../monk_v1/");
sys.path.append("../../../monk/");
import psutil
from pytorch_prototype import prototype
from compare_prototype import compare
from common import print_start
from common import print_status
import torch
import numpy as np
from pytorch.losses.return_loss import load_loss
def test_activation_softmin(system_dict):
forward = True;
test = "test_activation_softmin";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
network.append(gtf.softmin());
gtf.Compile_Network(network, data_shape=(3, 64, 64), use_gpu=False);
x = torch.randn(1, 3, 64, 64);
y = gtf.system_dict["local"]["model"](x);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
| [
"[email protected]"
]
| |
45d620d1e60cd162a992f66503976015885c17a8 | 60cbdf1f9771159f872e632017fa736800784297 | /Leetcode/Check-if-the-Sentence-is-Pangram.py | fe3da8ce862e017efe6b6dd38769acb3b97e5a82 | []
| no_license | AG-Systems/programming-problems | 6ea8c109f04c4d22db6e63fe7b665894c786242a | 39b2d3546d62b48388788e36316224e15a52d656 | refs/heads/master | 2023-04-16T16:59:20.595993 | 2023-04-05T01:25:23 | 2023-04-05T01:25:23 | 77,095,208 | 10 | 3 | null | 2019-10-14T16:16:18 | 2016-12-22T00:03:14 | Python | UTF-8 | Python | false | false | 332 | py | class Solution:
def checkIfPangram(self, sentence: str) -> bool:
letter_counter = {}
for letter in sentence:
if letter in letter_counter:
letter_counter[letter] += 1
else:
letter_counter[letter] = 1
return len(letter_counter.keys()) == 26
| [
"[email protected]"
]
| |
0ae5585fb9e152b45e4cc381b1aea2c6b8c650fe | 18b250fe572223ade49c2cf995e0aad0613abc6a | /scripts/artifacts/vlcThumbs.py | 5a1156c12cdf50a0855a63068213cc7f249375c2 | [
"MIT"
]
| permissive | ydkhatri/ALEAPP | e79e558005bf92519e45b17be99ad13aabf4f25e | 4f2a739d6accd832176cac8db72cded07fb17633 | refs/heads/master | 2022-08-19T07:14:59.669286 | 2022-07-26T03:09:16 | 2022-07-26T03:09:16 | 242,858,450 | 0 | 0 | MIT | 2021-03-19T16:09:59 | 2020-02-24T22:33:34 | JavaScript | UTF-8 | Python | false | false | 1,337 | py | import os
import shutil
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import timeline, tsv, is_platform_windows, open_sqlite_db_readonly
def get_vlcThumbs(files_found, report_folder, seeker, wrap_text):
data_list = []
for file_found in files_found:
file_found = str(file_found)
data_file_real_path = file_found
shutil.copy2(data_file_real_path, report_folder)
data_file_name = os.path.basename(data_file_real_path)
thumb = f'<img src="{report_folder}/{data_file_name}"></img>'
data_list.append((data_file_name, thumb))
path_to_files = os.path.dirname(data_file_real_path)
description = 'VLC Thumbnails'
report = ArtifactHtmlReport('VLC Thumbnails')
report.start_artifact_report(report_folder, 'VLC Thumbnails', description)
report.add_script()
data_headers = ('Filename', 'Thumbnail' )
report.write_artifact_data_table(data_headers, data_list, path_to_files, html_escape=False)
report.end_artifact_report()
tsvname = 'VLC Thumbnails'
tsv(report_folder, data_headers, data_list, tsvname)
__artifacts__ = {
"VLC Thumbs": (
"VLC",
('*/org.videolan.vlc/files/medialib/*.jpg'),
get_vlcThumbs)
}
| [
"[email protected]"
]
| |
9af56f4a07be6988eb257460a4bda61c2d12b231 | abf3ea33a5fa7457d1cd735310700df9c784d1ae | /CST100/Chapter_4/Chapter_4/Ch_4_Solutions/Ch_4_Projects/4.11/testnode.py | fbe1aafaeffff3f7a79626078998ce6c7db6794c | []
| no_license | hieugomeister/ASU | 57b8a2f604a27ce339675f40d3b042ccf57efb86 | 3e9254cebeaeb1c57ae912d6e5a02af7531128e8 | refs/heads/master | 2020-12-30T16:59:17.801581 | 2017-05-12T22:44:44 | 2017-05-12T22:44:44 | 91,046,525 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,934 | py | """
File: testnode.py
Project 4.11
Add a makeTwoWay function.
Tests the Node class.
"""
from node import Node, TwoWayNode
def length(head):
"""Returns the number of items in the linked structure
referred to by head."""
probe = head
count = 0
while probe != None:
count += 1
probe = probe.next
return count
def insert(index, newItem, head):
"""Inserts newItem at position is the linked structure
referred to by head. Returns a reference to the new
structure."""
if index <= 0:
# newItem goes at the head
head = Node(newItem, head)
else:
# Search for node at position index - 1 or the last position
probe = head
while index > 1 and probe.next != None:
probe = probe.next;
index -= 1
# Insert new node after node at position index - 1
# or last position
probe.next = Node(newItem, probe.next)
return head
def pop(index, head):
"""Removes the item at index from the linked structure
referred to by head and returns the tuple (head, item)
Precondition: 0 <= index < length(head)"""
if index < 0 or index >= length(head):
raise IndexErro("Index out of bounds")
# Assumes that the linked structure has at least one item
if index == 0:
removedItem = head.data
head = head.next
else:
# Search for node at position index - 1 or
# the next to last position
probe = head
while index > 1 and probe.next.next != None:
probe = probe.next
index -= 1
removedItem = probe.next.data
probe.next = probe.next.next
return (head, removedItem)
def makeTwoWay(head):
"""Creates and returns a doubly linked structure that
contains the items in the structure referred to by head."""
if head is None:
# Empty structure
return None
else:
# Set the first node
twoWayHead = TwoWayNode(head.data)
twoWayProbe = twoWayHead
probe = head
# Set remaining nodes, if any
while probe.next != None:
newNode = TwoWayNode(probe.next.data, twoWayProbe)
twoWayProbe.next = newNode
twoWayProbe = newNode
probe = probe.next
return twoWayHead
def printStructure(head):
"""Prints the items in the structure referred to by head."""
probe = head
while probe != None:
print(probe.data, end = " ")
probe = probe.next
print()
def main():
"""Tests modifications."""
head = None
# Add five nodes to the beginning of the linked structure
for count in range(1, 6):
head = Node(count, head)
print("5 4 3 2 1:", end = " ")
printStructure(head)
print("5 4 3 2 1:", end = " ")
twoWayHead = makeTwoWay(head)
printStructure(twoWayHead)
if __name__ == "__main__": main()
| [
"[email protected]"
]
| |
eff6f26588057c7a06e5c736ddbd4beb44480094 | 55f945f29f78c0c0c6ac110df808126a38999be5 | /devel/lib/python2.7/dist-packages/mav_msgs/msg/_TorqueThrust.py | 08a7b4e8e0d04bccf8c9630a834e84f91c8c4b07 | []
| no_license | aarchilla/NodeROS | 43e9f0d6931d1eb11057d229e20e2911fba943c2 | 4d79e3ffbbb19c11535613249fed2191ada63000 | refs/heads/master | 2020-06-16T20:00:39.218889 | 2019-07-07T18:36:17 | 2019-07-07T18:36:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | /home/esaii-admin/catkin_ws/devel/.private/mav_msgs/lib/python2.7/dist-packages/mav_msgs/msg/_TorqueThrust.py | [
"[email protected]"
]
| |
c3403fa8e1e383b59e7d439c6a8cb4257c367515 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4466/codes/1734_2506.py | 7e3f3e2a8fa3a6b63bd10bd541c82d212da44969 | []
| no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | q_inicial = int(input("quantidade inicial: "))
perc = float(input("percentual de crescimento: "))
quant = int(input("quantidade de pirarucus retirados: "))
perc = perc/100
t = 0
while(0 <= q_inicial <= 12000):
q_inicial = (q_inicial + q_inicial * perc) - quant
t = t + 1
if(q_inicial <= 0):
print("EXTINCAO")
print(t)
if(q_inicial >= 12000):
print("LIMITE")
print(t) | [
"[email protected]"
]
| |
eb93813e0136a34f5b51222dd6b5c3141c7b1d1c | eb280992ab7c39173f6f19d28ddf7efd8a29775a | /calaccess_processed_elections/apps.py | b93b394463300e21fac2cb0fa5fcc3069b0c68f6 | [
"MIT"
]
| permissive | ryanvmenezes/django-calaccess-processed-data | f5e99a8bdaf7c6555e357d3dabfd673fd12b8419 | 966635c8438cda440a12f7765af7c79b5bcb3995 | refs/heads/master | 2020-04-14T22:41:49.520588 | 2018-10-10T12:07:57 | 2018-10-10T12:07:57 | 99,171,493 | 0 | 0 | null | 2017-08-03T00:02:03 | 2017-08-03T00:02:03 | null | UTF-8 | Python | false | false | 3,720 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Basic configuration for the application.
"""
from __future__ import unicode_literals, absolute_import
import os
import collections
from django.apps import apps
from django.apps import AppConfig
class CalAccessProcessedElectionsConfig(AppConfig):
"""
Application configuration.
"""
name = 'calaccess_processed_elections'
verbose_name = "CAL-ACCESS processed data: Elections"
# Where SQL files are stored in this application
sql_directory_path = os.path.join(os.path.dirname(__file__), 'sql')
def get_ocd_models_list(self):
"""
Returns a list of all the OCD models proxied by this app.
"""
return list(self.get_ocd_models_map().keys())
def get_ocd_proxy_lookup(self):
"""
Returns a dictionary with the names of data models mapped to proxies.
"""
# Convert the keys to strings
return dict((k.__name__, v) for k, v in self.get_ocd_models_map().items())
def get_ocd_models_map(self):
"""
Returns a list of the models that should be saved in our archive.
"""
from . import proxies
ocd_core = apps.get_app_config('core')
ocd_elections = apps.get_app_config('elections')
# Create a dict mapping the models to proxies
return collections.OrderedDict({
ocd_core.get_model('Division'): proxies.OCDDivisionProxy,
ocd_core.get_model('Organization'): proxies.OCDOrganizationProxy,
ocd_core.get_model('OrganizationIdentifier'): proxies.OCDOrganizationIdentifierProxy,
ocd_core.get_model('OrganizationName'): proxies.OCDOrganizationNameProxy,
ocd_core.get_model('Jurisdiction'): proxies.OCDJurisdictionProxy,
ocd_core.get_model('Post'): proxies.OCDPostProxy,
ocd_core.get_model('Person'): proxies.OCDPersonProxy,
ocd_core.get_model('PersonIdentifier'): proxies.OCDPersonIdentifierProxy,
ocd_core.get_model('PersonName'): proxies.OCDPersonNameProxy,
ocd_core.get_model('Membership'): proxies.OCDMembershipProxy,
ocd_elections.get_model('Election'): proxies.OCDElectionProxy,
ocd_elections.get_model('ElectionIdentifier'): proxies.OCDElectionIdentifierProxy,
ocd_elections.get_model('ElectionSource'): proxies.OCDElectionSourceProxy,
ocd_elections.get_model('Candidacy'): proxies.OCDCandidacyProxy,
ocd_elections.get_model('CandidacySource'): proxies.OCDCandidacySourceProxy,
ocd_elections.get_model('BallotMeasureContest'): proxies.OCDBallotMeasureContestProxy,
ocd_elections.get_model('BallotMeasureContestOption'): proxies.OCDBallotMeasureContestOptionProxy,
ocd_elections.get_model('BallotMeasureContestIdentifier'): proxies.OCDBallotMeasureContestIdentifierProxy,
ocd_elections.get_model('BallotMeasureContestSource'): proxies.OCDBallotMeasureContestSourceProxy,
ocd_elections.get_model('RetentionContest'): proxies.OCDRetentionContestProxy,
ocd_elections.get_model('RetentionContestOption'): proxies.OCDRetentionContestOptionProxy,
ocd_elections.get_model('RetentionContestIdentifier'): proxies.OCDRetentionContestIdentifierProxy,
ocd_elections.get_model('RetentionContestSource'): proxies.OCDRetentionContestSourceProxy,
ocd_elections.get_model('CandidateContest'): proxies.OCDCandidateContestProxy,
ocd_elections.get_model('CandidateContestPost'): proxies.OCDCandidateContestPostProxy,
ocd_elections.get_model('CandidateContestSource'): proxies.OCDCandidateContestSourceProxy
})
| [
"[email protected]"
]
| |
e9a917a37d1b2602f0fecc3ba700c45e5ffdb762 | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/thirtyPercent/rank_2omz_A.py | 598632c5f22bcd1066085853b28e1def339718fd | []
| no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,392 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '2omz.csv'
identifier = 'A'
coefFrac = 0.3
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/thirtyPercent/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/thirtyPercent/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
#df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Keep coefficients within the given fraction when ordered by decreasing order of coefficient magnitude
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs['absVal'] = np.abs(coefs['coefficients'])
coefs.sort_values(by = 'absVal', ascending = False, inplace = True)
coefs = coefs[:int(14028 * coefFrac + 0.5)]
keepList = list(coefs.index)
del coefs
df1 = df1[keepList]
df1 = df1.reindex(sorted(df1.columns), axis = 1)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"[email protected]"
]
| |
4f54753f579ffb5420f90b1d8b8a3f5e105c7783 | 34652a47355a8dbe9200db229a1bbc62619de364 | /Algorithms/eppstein/PartitionRefinement.py | 25028f7afc76ad44dc47c9bfdac0723cd00e2448 | [
"MIT"
]
| permissive | btrif/Python_dev_repo | df34ab7066eab662a5c11467d390e067ab5bf0f8 | b4c81010a1476721cabc2621b17d92fead9314b4 | refs/heads/master | 2020-04-02T13:34:11.655162 | 2019-11-10T11:08:23 | 2019-11-10T11:08:23 | 154,487,015 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,119 | py | """PartitionRefinement.py
Maintain and refine a partition of a set of items into subsets,
as used e.g. in Hopcroft's DFA minimization algorithm,
modular decomposition of graphs, etc.
D. Eppstein, November 2003.
"""
class PartitionError(Exception): pass
class PartitionRefinement:
"""Maintain and refine a partition of a set of items into subsets.
Space usage for a partition of n items is O(n), and each refine
operation takes time proportional to the size of its argument.
"""
def __init__(self,items):
"""Create a new partition refinement data structure for the given
items. Initially, all items belong to the same subset.
"""
S = set(items)
self._sets = {id(S):S}
self._partition = {x:S for x in S}
def __getitem__(self,element):
"""Return the set that contains the given element."""
return self._partition[element]
def __iter__(self):
"""Loop through the sets in the partition."""
try: # Python 2/3 compatibility
return self._sets.itervalues()
except AttributeError:
return iter(self._sets.values())
def __len__(self):
"""Return the number of sets in the partition."""
return len(self._sets)
def add(self,element,theset):
"""Add a new element to the given partition subset."""
if id(theset) not in self._sets:
raise PartitionError("Set does not belong to the partition")
if element in self._partition:
raise PartitionError("Element already belongs to the partition")
theset.add(element)
self._partition[element] = theset
def remove(self,element):
"""Remove the given element from its partition subset."""
self._partition[element].remove(element)
del self._partition[element]
def refine(self,S):
"""Refine each set A in the partition to the two sets
A & S, A - S. Return a list of pairs (A & S, A - S)
for each changed set. Within each pair, A & S will be
a newly created set, while A - S will be a modified
version of an existing set in the partition.
Not a generator because we need to perform the partition
even if the caller doesn't iterate through the results.
"""
hit = {}
output = []
for x in S:
if x in self._partition:
Ax = self._partition[x]
hit.setdefault(id(Ax),set()).add(x)
for A,AS in hit.items():
A = self._sets[A]
if AS != A:
self._sets[id(AS)] = AS
for x in AS:
self._partition[x] = AS
A -= AS
output.append((AS,A))
return output
def freeze(self):
"""Make all sets in S immutable."""
for S in list(self._sets.values()):
F = frozenset(S)
for x in F:
self._partition[x] = F
self._sets[id(F)] = F
del self._sets[id(S)]
S = {1,4,9,16}
A = PartitionRefinement(S)
print(A.refine(S))
| [
"[email protected]"
]
| |
3f9d7d0aaff42ecd58b1353b226c30457aefb554 | 2fba0a631bb70aaae6dc89bff09f13e728934605 | /privacy/migrations/0022_auto_20200527_0909.py | 2f9d37c178d9929cd0adc472a56bc0457b5f6116 | []
| no_license | murengera/eshoping-api | 4c5bcbeb7ac3ef12858e08f8a88d4f7b710b5c64 | 90acb0f8db519a38a1bd0976bd1f704f6d02f2dd | refs/heads/master | 2022-12-25T10:19:39.431427 | 2020-09-26T12:35:38 | 2020-09-26T12:35:38 | 286,399,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | # Generated by Django 3.0 on 2020-05-27 07:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('privacy', '0021_auto_20200527_0908'),
]
operations = [
migrations.AlterField(
model_name='privacypoliciesandtermsofuse',
name='_type',
field=models.CharField(choices=[('terms_of_use', 'terms_of_use'), ('privacy_policy', 'privacy_policy')], max_length=50),
),
migrations.AlterField(
model_name='privacypoliciesandtermsofuse',
name='language',
field=models.CharField(choices=[('english', 'english'), ('rwandese', 'rwandese')], max_length=30),
),
]
| [
"[email protected]"
]
| |
5b89414e459547981f97861a6da0ef73ea51b958 | 2db1a0038d26ccb6adc572b536cb5cd401fd7498 | /lib/python2.7/site-packages/pip/commands/check.py | 95c64fc66c74741bc3e23fd86868dac809cb4f94 | []
| no_license | syurk/labpin | e795c557e7d7bcd4ff449cb9a3de32959a8c4968 | 04070dd5ce6c0a32c9ed03765f4f2e39039db411 | refs/heads/master | 2022-12-12T02:23:54.975797 | 2018-11-29T16:03:26 | 2018-11-29T16:03:26 | 159,692,630 | 0 | 1 | null | 2022-11-19T12:15:55 | 2018-11-29T16:04:20 | Python | UTF-8 | Python | false | false | 1,381 | py | import logging
from pip.basecommand import Command
from pip.operations.check import check_requirements
from pip.utils import get_installed_distributions
logger = logging.getLogger(__name__)
class CheckCommand(Command):
"""Verify installed packages have compatible dependencies."""
name = 'check'
usage = """
%prog [options]"""
summary = 'Verify installed packages have compatible dependencies.'
def run(self, options, args):
dists = get_installed_distributions(local_only=False, skip=())
missing_reqs_dict, incompatible_reqs_dict = check_requirements(dists)
for dist in dists:
key = '%s==%s' % (dist.project_name, dist.version)
for requirement in missing_reqs_dict.get(key, []):
logger.info(
"%s %s requires %s, which is not installed.",
dist.project_name, dist.version, requirement.project_name)
for requirement, actual in incompatible_reqs_dict.get(key, []):
logger.info(
"%s %s has requirement %s, but you have %s %s.",
dist.project_name, dist.version, requirement,
actual.project_name, actual.version)
if missing_reqs_dict or incompatible_reqs_dict:
return 1
else:
logger.info("No broken requirements found.")
| [
"[email protected]"
]
| |
c59b76f55ddc99b1693010dc6662d175c45b7f65 | 69e41359e2f01ffb12e243970a59e6fcc986e09a | /solved/Euler56.py | 87493072ac091de2dbfdf3fae52aa0ea07e77c2d | []
| no_license | pfhayes/euler | 0d4383f9cfa36890bdaf95bfdae553707c6cbc86 | 56f053afffb91262c7c48463700cab4fe6581813 | refs/heads/master | 2016-09-05T13:18:46.089574 | 2011-12-21T05:26:41 | 2011-12-21T05:26:41 | 1,786,274 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | # Find the maximum possible sum of digits for a^b, with a,b < 100
from useful import digits
maxA, maxB, maxSum = 0,0,0
for a in range (100) :
for b in range(100) :
s = sum(digits(a**b))
maxSum = max([s,maxSum])
if s == maxSum :
maxA = a
maxB = b
print maxSum, a, b | [
"[email protected]"
]
| |
49445015f0ed16f52b4534b346d9f4cc8f0baa8b | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /ec2_read_1/client-vpn-connection_list.py | 0a5a1dfa2928044398a0fafaa19dbe1a6072d131 | []
| no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,334 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import execute_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-client-vpn-connections.html
if __name__ == '__main__':
"""
terminate-client-vpn-connections : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/terminate-client-vpn-connections.html
"""
parameter_display_string = """
# client-vpn-endpoint-id : The ID of the Client VPN endpoint.
"""
add_option_dict = {}
#######################################################################
# setting option use
# ex: add_option_dict["setting_matching_parameter"] = "--owners"
# ex: add_option_dict["setting_key"] = "owner_id"
#######################################################################
# single parameter
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
execute_one_parameter("ec2", "describe-client-vpn-connections", "client-vpn-endpoint-id", add_option_dict) | [
"[email protected]"
]
| |
e12543041d44d3cb9be84a8134ebde85793d5476 | 1f79d9d02810a944c45fc962c62159035c5a2247 | /migrations/versions/2ce138017f09_.py | 44945f5b6e86b3a8d3d753b01cce2d62c3c70333 | []
| no_license | qsq-dm/mff | 5f17d6ffd1d4742dc46d1367cff35233af08a450 | d7f1e6f3fba95fe0d8ebb8937dda64a17e71f048 | refs/heads/master | 2020-12-29T02:19:29.037394 | 2016-08-01T15:40:42 | 2016-08-01T15:40:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | """empty message
Revision ID: 2ce138017f09
Revises: 38dd6746c99b
Create Date: 2015-12-10 19:14:00.636524
"""
# revision identifiers, used by Alembic.
revision = '2ce138017f09'
down_revision = '38dd6746c99b'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user_coupon', sa.Column('is_trial', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user_coupon', 'is_trial')
### end Alembic commands ###
| [
"root@localhost"
]
| root@localhost |
98dc97fd83c006e87c1140e8bd0d5d01343a1be4 | 289e6f9cf1d37fffb45810144e1a15f0de5c19d5 | /chiro/import_chiro.py | 0c60ee96d0de34304dd138784cf52fae28a9e7a1 | [
"MIT",
"CC0-1.0"
]
| permissive | chemical-roles/chemical-roles | 4eb912d6cc767f465e0e35e34d0c803a96e4d4f3 | 78801264a94a8b2b43ff553020483dd2ef9af993 | refs/heads/master | 2023-04-11T14:40:53.846885 | 2022-09-02T11:56:06 | 2022-09-02T11:56:06 | 199,155,107 | 6 | 5 | MIT | 2021-08-04T09:14:34 | 2019-07-27T11:17:57 | Python | UTF-8 | Python | false | false | 1,492 | py | import logging
from pyobo import get_id_name_mapping, get_obo_graph
RELATIONSHIPS = [
"activator_of",
"agonist_of",
"antagonist_of",
"destabilizer_of",
"inducer_of",
"inhibitor_of",
"modulator_of",
"sensitizer_of",
"stabilizier_of",
]
MAPPING_PREFIXES = ["ncbitaxon", "go", "pr", "hp", "mp"]
def main():
graph = get_obo_graph("chiro")
chebi_mapping = get_id_name_mapping("chebi")
mappings = {prefix: get_id_name_mapping(prefix) for prefix in MAPPING_PREFIXES}
triples = []
for h, data in graph.nodes(data=True):
if not data:
continue
r, t = data["relationship"][0].split()
r = r[: -len("_of")]
h_name = chebi_mapping.get(h)
if h_name is None:
print(f"Could not find name for chemical {h}")
continue
t_namespace = t.split(":")[0].lower()
t_mapping = mappings[t_namespace]
t_name = t_mapping.get(t)
if t_name is None:
print(f"Could not find name for target {t}")
continue
triples.append(("chebi", h, h_name, r, t_namespace, t, t_name))
with open("chiro_import.tsv", "w") as file:
print(
"source_db source_id source_name modulation type target_db target_id target_name",
file=file,
)
for t in sorted(triples):
print(*t, sep="\t", file=file)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
| [
"[email protected]"
]
| |
c79714327ccf731a9a7f8568306169ba46c9dba8 | 84f2cdc80da796b38433e88d9145cbd797e85f42 | /flaws/asttools.py | c4a8cb3502876a4d90fce1e613bde8734d777a52 | [
"BSD-2-Clause"
]
| permissive | EricSchles/flaws | 3be808d37fa1bfd050fa8e0ec3791ab7ee1e5365 | a6de9c2c2a89f79bd67a20535cea6a9ca677f357 | refs/heads/master | 2021-01-17T08:05:27.603218 | 2014-08-23T08:07:52 | 2014-08-23T08:07:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | import ast
def is_write(node):
return isinstance(node, (ast.Import, ast.ImportFrom,
ast.FunctionDef, ast.ClassDef, ast.arguments)) \
or isinstance(node.ctx, (ast.Store, ast.Del, ast.Param))
def is_use(node):
return isinstance(node, ast.Name) \
and isinstance(node.ctx, (ast.Load, ast.Del))
def is_constant(node):
return isinstance(node, ast.Name) and node.id.isupper()
def ast_eval(node):
if isinstance(node, ast.List):
return map(ast_eval, node.elts)
elif isinstance(node, ast.Str):
return node.s
elif isinstance(node, ast.Num):
return node.n
else:
raise ValueError("Don't know how to eval %s" % node.__class__.__name__)
def name_class(node):
if isinstance(node, (ast.Import, ast.ImportFrom)):
return 'import'
elif isinstance(node, ast.FunctionDef):
return 'function'
elif isinstance(node, ast.ClassDef):
return 'class'
elif isinstance(node, ast.Name) and isinstance(node.ctx, ast.Param) \
or isinstance(node, ast.arguments):
return 'param'
else:
return 'variable'
def node_str(node):
return '%s at %d:%d' % (name_class(node), node.lineno, node.col_offset)
def nodes_str(nodes):
return '[%s]' % ', '.join(map(node_str, nodes))
| [
"[email protected]"
]
| |
cbfc0f372350492bb4d3e472bf7a52dee56b078a | f3188f1f9da38f995bd65a423b2cc1cd1c31c55f | /PythonLeetcode/BinarySearch/easy/744. 寻找比目标字母大的最小字母.py | e5560199093099a93e75b3f4098cec0ae680a596 | [
"MIT"
]
| permissive | Lcoderfit/Introduction-to-algotithms | 34be05019870b6d4d967b0112e7953829448cdb0 | aea2630be6ca2c60186593d6e66b0a59e56dc848 | refs/heads/master | 2023-05-11T01:01:09.222149 | 2021-10-13T03:16:40 | 2021-10-13T03:16:40 | 146,017,809 | 3 | 1 | MIT | 2023-05-05T02:22:34 | 2018-08-24T16:56:13 | Go | UTF-8 | Python | false | false | 1,829 | py | """
方法1: 二分查找
时间复杂度:O(logn)
空间复杂度:O(1)
方法2: 线性扫描
时间复杂度:O(n)
空间复杂度:O(1)
case1:
a
c f j
case 2:
c
c f j
case 3:
d
c f j
case 4:
g
c f j
case 5:
j
c f j
case 6:
k
c f j
"""
import sys
from typing import List
class Solution:
@staticmethod
def next_greatest_letter(letters: List[str], target: str) -> str:
i, j = 0, len(letters) - 1
# 本质上是求左边界,因为是求满足比目标值大的数中的最小值,在升序的数组里,“最小”对应的就是左边界
# 左边界,mid指向左,右指针指向mid
while i < j:
mid = (i + j) // 2
if letters[mid] <= target:
i = mid + 1
else:
j = mid
if (i == len(letters) - 1) and (letters[i] > target):
return letters[-1]
if (i == len(letters) - 1) and (letters[i] <= target):
return letters[0]
return letters[i]
@staticmethod
def next_greatest_letter1(letters: List[str], target: str) -> str:
i, j = 0, len(letters) - 1
while i <= j:
mid = (i + j) // 2
if letters[mid] <= target:
i = mid + 1
else:
j = mid - 1
if i == len(letters):
return letters[0]
return letters[i]
@staticmethod
def next_greatest_letter2(letters: List[str], target: str) -> str:
for c in letters:
if c > target:
return c
return letters[0]
if __name__ == '__main__':
s = Solution()
for line in sys.stdin:
target_cur = line.strip()
letters_cur = [i for i in input().split(" ")]
res = s.next_greatest_letter(letters_cur, target_cur)
print(res)
| [
"[email protected]"
]
| |
5eb112988098db6980600c2ca4c2ab2b15e030fc | 11705b5971757122772cc420912b509b1f39255c | /web/service/github/api/v3/repositories/Repositories.py | bc527209990819a483f3582a0a1b6414ed875d15 | [
"CC0-1.0",
"Unlicense",
"Apache-2.0",
"MIT"
]
| permissive | ytyaru/GitHub.Upload.Delete.CommentAndFile.201703281815 | 4bff9cba1e6bb2bec596d1190eb653169a01c839 | ce4d6c3830bff9d9c152d1d6224ad317f46ea778 | refs/heads/master | 2021-01-20T08:54:00.762565 | 2017-05-03T22:37:38 | 2017-05-03T22:37:38 | 90,199,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,598 | py | #!python3
#encoding
import requests
import urllib.parse
import json
import web.http.Response
class Repositories:
def __init__(self, data, reqp, response):
self.data = data
self.reqp = reqp
self.response = response
def create(self, name, description=None, homepage=None):
method = 'POST'
endpoint = 'user/repos'
params = self.reqp.get(method, endpoint)
params['data'] = json.dumps({"name": name, "description": description, "homepage": homepage})
print(params)
r = requests.post(urllib.parse.urljoin("https://api.github.com", endpoint), headers=params['headers'], data=params['data'])
return self.response.Get(r, res_type='json')
def gets(self, visibility=None, affiliation=None, type=None, sort='full_name', direction=None, per_page=30):
if (visibility is None) and (affiliation is None) and (type is None):
type = 'all'
self.__raise_param_error(visibility, ['all', 'public', 'private'], 'visibility')
if not(None is affiliation):
for a in affiliation.split(','):
self.__raise_param_error(a, ['owner', 'collaborator', 'organization_member'], 'affiliation')
self.__raise_param_error(type, ['all', 'owner', 'public', 'private', 'member'], 'type')
self.__raise_param_error(sort, ['created', 'updated', 'pushed', 'full_name'], 'sort')
if direction is None:
if sort == 'full_name':
direction = 'asc'
else:
direction = 'desc'
else:
self.__raise_param_error(direction, ['asc', 'desc'], 'direction')
method = 'GET'
endpoint = 'user/repos'
params = self.reqp.get(method, endpoint)
params['headers']['Accept'] = 'application/vnd.github.drax-preview+json'
params['params'] = {}
if not(None is visibility):
params['params']["visibility"] = visibility
if not(None is affiliation):
params['params']["affiliation"] = affiliation
if not(None is type):
params['params']["type"] = type
if not(None is sort):
params['params']["sort"] = sort
if not(None is direction):
params['params']["direction"] = direction
if not(None is per_page):
params['params']["per_page"] = per_page
print(params)
repos = []
url = urllib.parse.urljoin("https://api.github.com", endpoint)
while (None is not url):
print(url)
params = self.reqp.update_otp(params)
print(params)
r = requests.get(url, headers=params['headers'], params=params['params'])
repos += self.response.Get(r, res_type='json')
url = self.response.GetLinkNext(r)
return repos
def __raise_param_error(self, target, check_list, target_name):
if not(target is None) and not(target in check_list):
raise Exception("Parameter Error: [{0}] should be one of the following values. : {1}".format(target_name, check_list))
"""
公開リポジトリの一覧を取得する。
@param [int] since is repository id on github.
"""
def list_public_repos(self, since, per_page=30):
method = 'GET'
endpoint = 'repositories'
params = self.reqp.get(method, endpoint)
params['params'] = json.dumps({"since": since, "per_page": per_page})
print(params)
r = requests.get(urllib.parse.urljoin("https://api.github.com", endpoint), headers=params['headers'])
return self.response.Get(r, res_type='json')
"""
リポジトリを削除する。
引数を指定しなければ、デフォルトユーザのカレントディレクトリ名リポジトリを対象とする。
"""
def delete(self, username=None, repo_name=None):
if None is username:
username = self.data.get_username()
if None is repo_name:
repo_name = self.data.get_repo_name()
endpoint = 'repos/:owner/:repo'
params = self.reqp.get('DELETE', endpoint)
endpoint = endpoint.replace(':owner', username)
endpoint = endpoint.replace(':repo', repo_name)
r = requests.delete(urllib.parse.urljoin("https://api.github.com", endpoint), headers=params['headers'])
return self.response.Get(r)
"""
リポジトリを編集する。
リポジトリ名、説明文、homepageを変更する。
指定せずNoneのままなら変更しない。
"""
def edit(self, name=None, description=None, homepage=None):
if None is name:
name = self.data.get_repo_name()
if None is description:
description = self.data.get_repo_description()
if None is homepage:
homepage = self.data.get_repo_homepage()
endpoint = 'repos/:owner/:repo'
params = self.reqp.get('PATCH', endpoint)
endpoint = endpoint.replace(':owner', self.data.get_username())
endpoint = endpoint.replace(':repo', self.data.get_repo_name())
params['data'] = {}
params['data']['name'] = name
if not(None is description or '' == description):
params['data']['description'] = description
if not(None is homepage or '' == homepage):
params['data']['homepage'] = homepage
r = requests.patch(urllib.parse.urljoin("https://api.github.com", endpoint), headers=params['headers'], data=json.dumps(params['data']))
return self.response.Get(r, res_type='json')
"""
リポジトリのプログラミング言語とそのファイルサイズを取得する。
@param {string} usernameはユーザ名
@param {string} repo_nameは対象リポジトリ名
@return {dict} 結果(JSON形式)
"""
def list_languages(self, username=None, repo_name=None):
if None is username:
username = self.reqp.get_username()
if None is repo_name:
repo_name = self.data.get_repo_name()
endpoint = 'repos/:owner/:repo/languages'
params = self.reqp.get('GET', endpoint)
endpoint = endpoint.replace(':owner', username)
endpoint = endpoint.replace(':repo', repo_name)
r = requests.get(urllib.parse.urljoin("https://api.github.com", endpoint), headers=params['headers'])
return self.response.Get(r, res_type='json')
| [
"[email protected]"
]
| |
10c388059eabb303f3a11a60b8fac735303683bb | e828fca9d0622710b43222c377adf954df072220 | /shabanipy/quantum_hall/conversion.py | acaf9ac2185f0ef01f596d08336c2ef3d946b958 | [
"MIT"
]
| permissive | jnt299/shabanipy | f42cb4abb648e1ce42501a4d1187a74f2a78011c | 1c2b5b861849ccf76b5ea6aaf0fcbf429aa6bfcf | refs/heads/master | 2022-11-30T17:58:22.295183 | 2020-08-13T19:56:37 | 2020-08-13T19:56:37 | 288,523,531 | 1 | 0 | null | 2020-08-18T17:41:35 | 2020-08-18T17:41:34 | null | UTF-8 | Python | false | false | 3,538 | py | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2019 by ShabaniPy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the MIT license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Typical Hall bar data conversion routines.
"""
from math import pi, log
import numpy as np
import scipy.constants as cs
GEOMETRIC_FACTORS = {
"Van der Pauw": pi / log(2),
"Standard Hall bar": 0.75,
}
def convert_lock_in_meas_to_diff_res(measured_voltage, bias_current):
"""Convert the voltage measured using a lock-in to differential resistance.
"""
return measured_voltage / bias_current
def kf_from_density(density):
"""Compute the Fermi wavevector from the density.
Parameters
----------
density : float | np.ndarray
Carriers density of the sample expected to be in m^-2
Returns
-------
kf : float | np.ndarray
Fermi wavevector in m^-1.
"""
return np.sqrt(2 * np.pi * density)
def mean_free_time_from_mobility(mobility, effective_mass):
"""Compute the mean free time from the sample mobility
Parameters
----------
mobility : float | np.ndarray
Carriers mobility of the sample in m^2s^-2V^-1.
effective_mass : float
Effective mass of the carriers in kg.
Returns
-------
mean_free_time : float | np.ndarray
Mean free time in s.
"""
return mobility * effective_mass / cs.e
def fermi_velocity_from_kf(kf, effective_mass):
"""Compute the Fermi velocity from the Fermi wavelength
Parameters
----------
kf : float | np.ndarray
Fermi wavevector in m^-1.
effective_mass : float | np.ndarray
Effective mass in kg.
Returns
-------
fermi_vel : float | np.ndarray
Fermi velocity in m.s^-1.
"""
return cs.hbar * kf / effective_mass
def fermi_velocity_from_density(density, effective_mass):
"""Compute the Fermi velocity directly from the density.
Parameters
----------
density : : float | np.ndarray
Carriers density of the sample expected to be in m^-2
Returns
-------
fermi_vel : float | np.ndarray
Fermi velocity in m.s^-1.
"""
return fermi_velocity_from_kf(kf_from_density(density), effective_mass)
def diffusion_constant_from_mobility_density(mobility, density, effective_mass):
"""Compute the diffusion constant from mobility and density.
Parameters
----------
mobility : float | np.ndarray
Carriers mobility of the sample m^2s^-sV^-1.
density : : float | np.ndarray
Carriers density of the sample expected to be in m^-2
Returns
-------
diffusion_constant : float | np.ndarray
Diffusion constant of the carriers m^2s^-1.
"""
vf = fermi_velocity_from_density(density, effective_mass)
mft = mean_free_time_from_mobility(mobility, effective_mass)
return vf ** 2 * mft / 2
def htr_from_mobility_density(mobility, density, effective_mass):
"""[summary]
Parameters
----------
mobilities : [type]
[description]
densities : [type]
[description]
Returns
-------
"""
d = diffusion_constant_from_mobility_density(mobility, density, effective_mass)
mft = mean_free_time_from_mobility(mobility, effective_mass)
return cs.hbar / (4 * cs.e * d * mft)
| [
"[email protected]"
]
| |
54b6d697974e94e58e1db9e716971b7d5af3e9b6 | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-projectman/huaweicloudsdkprojectman/v4/model/list_issue_comments_v4_request.py | 52fa5da1dc3b94983d614e5dad194057809f34b9 | [
"Apache-2.0"
]
| permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 4,887 | py | # coding: utf-8
import pprint
import re
import six
class ListIssueCommentsV4Request:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'project_id': 'str',
'issue_id': 'int',
'offset': 'int',
'limit': 'int'
}
attribute_map = {
'project_id': 'project_id',
'issue_id': 'issue_id',
'offset': 'offset',
'limit': 'limit'
}
def __init__(self, project_id=None, issue_id=None, offset=None, limit=None):
"""ListIssueCommentsV4Request - a model defined in huaweicloud sdk"""
self._project_id = None
self._issue_id = None
self._offset = None
self._limit = None
self.discriminator = None
self.project_id = project_id
self.issue_id = issue_id
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
@property
def project_id(self):
"""Gets the project_id of this ListIssueCommentsV4Request.
项目id
:return: The project_id of this ListIssueCommentsV4Request.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this ListIssueCommentsV4Request.
项目id
:param project_id: The project_id of this ListIssueCommentsV4Request.
:type: str
"""
self._project_id = project_id
@property
def issue_id(self):
"""Gets the issue_id of this ListIssueCommentsV4Request.
工作项id
:return: The issue_id of this ListIssueCommentsV4Request.
:rtype: int
"""
return self._issue_id
@issue_id.setter
def issue_id(self, issue_id):
"""Sets the issue_id of this ListIssueCommentsV4Request.
工作项id
:param issue_id: The issue_id of this ListIssueCommentsV4Request.
:type: int
"""
self._issue_id = issue_id
@property
def offset(self):
"""Gets the offset of this ListIssueCommentsV4Request.
分页索引,偏移量
:return: The offset of this ListIssueCommentsV4Request.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListIssueCommentsV4Request.
分页索引,偏移量
:param offset: The offset of this ListIssueCommentsV4Request.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this ListIssueCommentsV4Request.
每页显示的条数,最大显示100条
:return: The limit of this ListIssueCommentsV4Request.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListIssueCommentsV4Request.
每页显示的条数,最大显示100条
:param limit: The limit of this ListIssueCommentsV4Request.
:type: int
"""
self._limit = limit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListIssueCommentsV4Request):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
e6af66b8ae690c09cd953e9196226280a0bd603b | a6590941fea4880593d5b1cd23eedfe696f4e446 | /ABC01_99/ABC95/a.py | 980ed436328727063428b248fda3aaaf39cf0aaa | []
| no_license | cod4i3/MyAtcoder | 9fb92f2dd06c5b6217e925a82d8db4f91355a70f | 53bdac3fa7eb4ac48ca6d5c70461639beb6aa81d | refs/heads/master | 2023-02-17T09:15:16.282873 | 2021-01-15T13:34:03 | 2021-01-15T13:34:03 | 232,006,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | s = input()
print(700 + s.count('o') * 100)
| [
"[email protected]"
]
| |
24ab81ff2c511dd5587eebf58083e235fd9bdec7 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/fc/apinninglbl.py | f7b4c92b96d82adacf7230e8ed621d61e9384b9f | []
| no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,523 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class APinningLbl(Mo):
meta = ClassMeta("cobra.model.fc.APinningLbl")
meta.isAbstract = True
meta.moClassName = "fcAPinningLbl"
meta.moClassName = "fcAPinningLbl"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstract Fibre Channel Uplink Pinning Label"
meta.writeAccessMask = 0x601
meta.readAccessMask = 0x601
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.concreteSubClasses.add("cobra.model.fc.PinningLbl")
meta.concreteSubClasses.add("cobra.model.fc.PinningLblDef")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
]
| |
31212698b833a9003fd16b7a5fc99096aa8e5d13 | b39b0625795b0640a6a68151f2012ce139f423b8 | /iaas/test/test_flavor_profile_api.py | 095a47c5a2b57f517a3c35c6945e5b54508299a9 | []
| no_license | darrylcauldwell/casCodegen | 8e82b1f08e8260482996aec3d8be10934a65dd03 | 1f1ff9ab8a33102bcfcb8be276d51992d96bcb61 | refs/heads/master | 2020-07-27T14:42:28.550855 | 2019-09-17T18:30:28 | 2019-09-17T18:30:28 | 209,127,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | # coding: utf-8
"""
VMware Cloud Assembly IaaS API
A multi-cloud IaaS API for Cloud Automation Services # noqa: E501
OpenAPI spec version: 2019-01-15
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from api.flavor_profile_api import FlavorProfileApi # noqa: E501
from swagger_client.rest import ApiException
class TestFlavorProfileApi(unittest.TestCase):
"""FlavorProfileApi unit test stubs"""
def setUp(self):
self.api = api.flavor_profile_api.FlavorProfileApi() # noqa: E501
def tearDown(self):
pass
def test_create_flavor_profile(self):
"""Test case for create_flavor_profile
Create flavor profile # noqa: E501
"""
pass
def test_delete_flavor_profile(self):
"""Test case for delete_flavor_profile
Delete flavor profile # noqa: E501
"""
pass
def test_get_flavor_profile(self):
"""Test case for get_flavor_profile
Get flavor profile # noqa: E501
"""
pass
def test_get_flavor_profiles(self):
"""Test case for get_flavor_profiles
Get flavor profile # noqa: E501
"""
pass
def test_update_flavor_profile(self):
"""Test case for update_flavor_profile
Update flavor profile # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
1ba90dd656c8980eff31b4972d50accaaff84971 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_Quantization/trend_MovingAverage/cycle_30/ar_/test_artificial_1024_Quantization_MovingAverage_30__20.py | 1d8cd7fcd6989efe67729b85e14bd6887518a581 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 273 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 30, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 0); | [
"[email protected]"
]
| |
e0d15eea5b6d89432ba750f5c3a61bdb7bd0ce84 | 730103ddecd23142238defe2a2b1ab3c582cdc45 | /onnx2tf/ops/ReverseSequence.py | c2c8dc8337c257539be89abccb5dab2eb3372482 | [
"Apache-2.0",
"MIT"
]
| permissive | PINTO0309/onnx2tf | dcfb0fd8a4810ef1262aa565ba42b5124012bdb2 | b0e7d106cc69c0ea0fd464c4dd9064a5b0d6668b | refs/heads/main | 2023-08-30T23:28:56.386741 | 2023-08-29T01:48:40 | 2023-08-29T01:48:40 | 541,831,874 | 345 | 45 | MIT | 2023-09-14T16:53:12 | 2022-09-27T00:06:32 | Python | UTF-8 | Python | false | false | 3,308 | py | import random
random.seed(0)
import numpy as np
np.random.seed(0)
import tensorflow as tf
import onnx_graphsurgeon as gs
from onnx2tf.utils.common_functions import (
get_constant_or_variable,
print_node_info,
inverted_operation_enable_disable,
make_tf_node_info,
get_replacement_parameter,
pre_process_transpose,
post_process_transpose,
)
@print_node_info
@inverted_operation_enable_disable
@get_replacement_parameter
def make_node(
*,
graph_node: gs.Node,
tf_layers_dict: dict,
**kwargs: dict,
):
"""ReverseSequence
Parameters
----------
graph_node: gs.Node
graph_surgeon Node
tf_layers_dict: dict
optype, shape, dtype, tensorflow graph
"""
before_op_output_shape_trans_1 = \
tf_layers_dict.get(graph_node.inputs[0].name, {}).get('before_op_output_shape_trans', True)
before_op_output_shape_trans = \
before_op_output_shape_trans_1
graph_node_input_1 = get_constant_or_variable(
graph_node.inputs[0],
before_op_output_shape_trans,
)
input_tensor = tf_layers_dict[graph_node_input_1.name]['tf_node'] \
if isinstance(graph_node_input_1, gs.Variable) else graph_node_input_1
graph_node_input_2 = get_constant_or_variable(
graph_node.inputs[1],
before_op_output_shape_trans,
)
sequence_lens = tf_layers_dict[graph_node_input_2.name]['tf_node'] \
if isinstance(graph_node_input_2, gs.Variable) else graph_node_input_2
graph_node_output: gs.Variable = graph_node.outputs[0]
shape = graph_node_output.shape
dtype = graph_node_output.dtype
batch_axis = graph_node.attrs.get('batch_axis', 1)
time_axis = graph_node.attrs.get('time_axis', 0)
# Preserving Graph Structure (Dict)
tf_layers_dict[graph_node_output.name] = {
'optype': graph_node.op,
'shape': shape,
'dtype': dtype,
}
# Pre-process transpose
input_tensor = pre_process_transpose(
value_before_transpose=input_tensor,
param_target='inputs',
param_name=graph_node.inputs[0].name,
**kwargs,
)
# Generation of TF OP
tf_layers_dict[graph_node_output.name]['tf_node'] = \
tf.reverse_sequence(
input=input_tensor,
seq_lengths=sequence_lens,
seq_axis=time_axis,
batch_axis=batch_axis,
name=graph_node.name,
)
# Post-process transpose
tf_layers_dict[graph_node_output.name]['tf_node'] = post_process_transpose(
value_before_transpose=tf_layers_dict[graph_node_output.name]['tf_node'],
param_target='outputs',
param_name=graph_node.outputs[0].name,
**kwargs,
)
# Generation of Debug Info
tf_layers_dict[graph_node_output.name]['tf_node_info'] = \
make_tf_node_info(
node_info={
'tf_op_type': tf.reverse_sequence,
'tf_inputs': {
'input': input_tensor,
'seq_lengths': sequence_lens,
'seq_axis': time_axis,
'batch_axis': batch_axis,
},
'tf_outputs': {
'output': tf_layers_dict[graph_node_output.name]['tf_node'],
},
}
)
| [
"[email protected]"
]
| |
979b699a367d604f9353cf9805004d4f0d43b7c5 | 966280ab617298a3ced79bc60189b301c795067a | /Sliding-Window/239_sliding_window_maximum.py | 445ece104ef138fc8ad1d83b3627505908fe52ce | []
| no_license | Rishabhh/LeetCode-Solutions | c0382e5ba5b77832322c992418f697f42213620f | 2536744423ee9dc7da30e739eb0bca521c216f00 | refs/heads/master | 2020-06-10T02:37:42.103289 | 2019-05-29T06:38:02 | 2019-05-29T06:38:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | import collections
class Solution:
def max_sliding_window(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
res = []
q = collections.deque()
n = len(nums)
for i in range(n):
while q and q[-1][1] <= nums[i]:
q.pop()
q.append((i, nums[i]))
if i >= k:
while q and q[0][0] <= i - k:
q.popleft()
if i >= k - 1:
res.append(q[0][1])
return res
| [
"[email protected]"
]
| |
879ed203a95faf1ad6a9ca1ed7ab98c3695fd4b6 | bd2a975f5f6cd771393f994ebd428e43142ee869 | /new_render_data/input/p/script/abort/kafka/consumer/group.py | 54a3711aeb79c67b8eeaec2a9f8a97e0c5b52feb | []
| no_license | sol87/Pycharm_python36 | 1a297c9432462fc0d3189a1dc7393fdce26cb501 | fa7d53990040d888309a349cfa458a537b8d5f04 | refs/heads/master | 2023-03-16T10:35:55.697402 | 2018-11-08T09:52:14 | 2018-11-08T09:52:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52,351 | py | from __future__ import absolute_import
import copy
import logging
import socket
import sys
import time
from kafka.errors import KafkaConfigurationError, UnsupportedVersionError
from kafka.vendor import six
from kafka.client_async import KafkaClient, selectors
from kafka.consumer.fetcher import Fetcher
from kafka.consumer.subscription_state import SubscriptionState
from kafka.coordinator.consumer import ConsumerCoordinator
from kafka.coordinator.assignors.range import RangePartitionAssignor
from kafka.coordinator.assignors.roundrobin import RoundRobinPartitionAssignor
from kafka.metrics import MetricConfig, Metrics
from kafka.protocol.offset import OffsetResetStrategy
from kafka.structs import TopicPartition
from kafka.version import __version__
log = logging.getLogger(__name__)
class KafkaConsumer(six.Iterator):
"""Consume records from a Kafka cluster.
The consumer will transparently handle the failure of servers in the Kafka
cluster, and adapt as topic-partitions are created or migrate between
brokers. It also interacts with the assigned kafka Group Coordinator node
to allow multiple consumers to load balance consumption of topics (requires
kafka >= 0.9.0.0).
The consumer is not thread safe and should not be shared across threads.
Arguments:
*topics (str): optional list of topics to subscribe to. If not set,
call :meth:`~kafka.KafkaConsumer.subscribe` or
:meth:`~kafka.KafkaConsumer.assign` before consuming records.
Keyword Arguments:
bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
strings) that the consumer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default port is 9092. If no servers are
specified, will default to localhost:9092.
client_id (str): A name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: 'kafka-python-{version}'
group_id (str or None): The name of the consumer group to join for dynamic
partition assignment (if enabled), and to use for fetching and
committing offsets. If None, auto-partition assignment (via
group coordinator) and offset commits are disabled.
Default: None
key_deserializer (callable): Any callable that takes a
raw message key and returns a deserialized key.
value_deserializer (callable): Any callable that takes a
raw message value and returns a deserialized value.
fetch_min_bytes (int): Minimum amount of data the server should
return for a fetch request, otherwise wait up to
fetch_max_wait_ms for more data to accumulate. Default: 1.
fetch_max_wait_ms (int): The maximum amount of time in milliseconds
the server will block before answering the fetch request if
there isn't sufficient data to immediately satisfy the
requirement given by fetch_min_bytes. Default: 500.
fetch_max_bytes (int): The maximum amount of data the server should
return for a fetch request. This is not an absolute maximum, if the
first message in the first non-empty partition of the fetch is
larger than this value, the message will still be returned to
ensure that the consumer can make progress. NOTE: consumer performs
fetches to multiple brokers in parallel so memory usage will depend
on the number of brokers containing partitions for the topic.
Supported Kafka version >= 0.10.1.0. Default: 52428800 (50 Mb).
max_partition_fetch_bytes (int): The maximum amount of data
per-partition the server will return. The maximum total memory
used for a request = #partitions * max_partition_fetch_bytes.
This size must be at least as large as the maximum message size
the server allows or else it is possible for the producer to
send messages larger than the consumer can fetch. If that
happens, the consumer can get stuck trying to fetch a large
message on a certain partition. Default: 1048576.
request_timeout_ms (int): Client request timeout in milliseconds.
Default: 40000.
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
reconnect_backoff_ms (int): The amount of time in milliseconds to
wait before attempting to reconnect to a given host.
Default: 50.
reconnect_backoff_max_ms (int): The maximum amount of time in
milliseconds to wait when reconnecting to a broker that has
repeatedly failed to connect. If provided, the backoff per host
will increase exponentially for each consecutive connection
failure, up to this maximum. To avoid connection storms, a
randomization factor of 0.2 will be applied to the backoff
resulting in a random range between 20% below and 20% above
the computed value. Default: 1000.
max_in_flight_requests_per_connection (int): Requests are pipelined
to kafka brokers up to this number of maximum requests per
broker connection. Default: 5.
auto_offset_reset (str): A policy for resetting offsets on
OffsetOutOfRange errors: 'earliest' will move to the oldest
available message, 'latest' will move to the most recent. Any
other value will raise the exception. Default: 'latest'.
enable_auto_commit (bool): If True , the consumer's offset will be
periodically committed in the background. Default: True.
auto_commit_interval_ms (int): Number of milliseconds between automatic
offset commits, if enable_auto_commit is True. Default: 5000.
default_offset_commit_callback (callable): Called as
callback(offsets, response) response will be either an Exception
or an OffsetCommitResponse struct. This callback can be used to
trigger custom actions when a commit request completes.
check_crcs (bool): Automatically check the CRC32 of the records
consumed. This ensures no on-the-wire or on-disk corruption to
the messages occurred. This check adds some overhead, so it may
be disabled in cases seeking extreme performance. Default: True
metadata_max_age_ms (int): The period of time in milliseconds after
which we force a refresh of metadata, even if we haven't seen any
partition leadership changes to proactively discover any new
brokers or partitions. Default: 300000
partition_assignment_strategy (list): List of objects to use to
distribute partition ownership amongst consumer instances when
group management is used.
Default: [RangePartitionAssignor, RoundRobinPartitionAssignor]
heartbeat_interval_ms (int): The expected time in milliseconds
between heartbeats to the consumer coordinator when using
Kafka's group management feature. Heartbeats are used to ensure
that the consumer's session stays active and to facilitate
rebalancing when new consumers join or leave the group. The
value must be set lower than session_timeout_ms, but typically
should be set no higher than 1/3 of that value. It can be
adjusted even lower to control the expected time for normal
rebalances. Default: 3000
session_timeout_ms (int): The timeout used to detect failures when
using Kafka's group management facilities. Default: 30000
max_poll_records (int): The maximum number of records returned in a
single call to :meth:`~kafka.KafkaConsumer.poll`. Default: 500
receive_buffer_bytes (int): The size of the TCP receive buffer
(SO_RCVBUF) to use when reading data. Default: None (relies on
system defaults). The java client defaults to 32768.
send_buffer_bytes (int): The size of the TCP send buffer
(SO_SNDBUF) to use when sending data. Default: None (relies on
system defaults). The java client defaults to 131072.
socket_options (list): List of tuple-arguments to socket.setsockopt
to apply to broker connection sockets. Default:
[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
consumer_timeout_ms (int): number of milliseconds to block during
message iteration before raising StopIteration (i.e., ending the
iterator). Default block forever [float('inf')].
skip_double_compressed_messages (bool): A bug in KafkaProducer <= 1.2.4
caused some messages to be corrupted via double-compression.
By default, the fetcher will return these messages as a compressed
blob of bytes with a single offset, i.e. how the message was
actually published to the cluster. If you prefer to have the
fetcher automatically detect corrupt messages and skip them,
set this option to True. Default: False.
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
ssl_context (ssl.SSLContext): Pre-configured SSLContext for wrapping
socket connections. If provided, all other ssl_* configurations
will be ignored. Default: None.
ssl_check_hostname (bool): Flag to configure whether ssl handshake
should verify that the certificate matches the brokers hostname.
Default: True.
ssl_cafile (str): Optional filename of ca file to use in certificate
verification. Default: None.
ssl_certfile (str): Optional filename of file in pem format containing
the client certificate, as well as any ca certificates needed to
establish the certificate's authenticity. Default: None.
ssl_keyfile (str): Optional filename containing the client private key.
Default: None.
ssl_password (str): Optional password to be used when loading the
certificate chain. Default: None.
ssl_crlfile (str): Optional filename containing the CRL to check for
certificate expiration. By default, no CRL check is done. When
providing a file, only the leaf certificate will be checked against
this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
Default: None.
api_version (tuple): Specify which Kafka API version to use. If set to
None, the client will attempt to infer the broker version by probing
various APIs. Different versions enable different functionality.
Examples:
(0, 9) enables full group coordination features with automatic
partition assignment and rebalancing,
(0, 8, 2) enables kafka-storage offset commits with manual
partition assignment only,
(0, 8, 1) enables zookeeper-storage offset commits with manual
partition assignment only,
(0, 8, 0) enables basic functionality but requires manual
partition assignment and offset management.
For the full list of supported versions, see
KafkaClient.API_VERSIONS. Default: None
api_version_auto_timeout_ms (int): number of milliseconds to throw a
timeout exception from the constructor when checking the broker
api version. Only applies if api_version set to 'auto'
metric_reporters (list): A list of classes to use as metrics reporters.
Implementing the AbstractMetricsReporter interface allows plugging
in classes that will be notified of new metric creation. Default: []
metrics_num_samples (int): The number of samples maintained to compute
metrics. Default: 2
metrics_sample_window_ms (int): The maximum age in milliseconds of
samples used to compute metrics. Default: 30000
selector (selectors.BaseSelector): Provide a specific selector
implementation to use for I/O multiplexing.
Default: selectors.DefaultSelector
exclude_internal_topics (bool): Whether records from internal topics
(such as offsets) should be exposed to the consumer. If set to True
the only way to receive records from an internal topic is
subscribing to it. Requires 0.10+ Default: True
sasl_mechanism (str): String picking sasl mechanism when security_protocol
is SASL_PLAINTEXT or SASL_SSL. Currently only PLAIN is supported.
Default: None
sasl_plain_username (str): Username for sasl PLAIN authentication.
Default: None
sasl_plain_password (str): Password for sasl PLAIN authentication.
Default: None
Note:
Configuration parameters are described in more detail at
https://kafka.apache.org/documentation/#newconsumerconfigs
"""
DEFAULT_CONFIG = {
'bootstrap_servers': 'localhost',
'client_id': 'kafka-python-' + __version__,
'group_id': None,
'key_deserializer': None,
'value_deserializer': None,
'fetch_max_wait_ms': 500,
'fetch_min_bytes': 1,
'fetch_max_bytes': 52428800,
'max_partition_fetch_bytes': 1 * 1024 * 1024,
'request_timeout_ms': 40 * 1000,
'retry_backoff_ms': 100,
'reconnect_backoff_ms': 50,
'reconnect_backoff_max_ms': 1000,
'max_in_flight_requests_per_connection': 5,
'auto_offset_reset': 'latest',
'enable_auto_commit': True,
'auto_commit_interval_ms': 5000,
'default_offset_commit_callback': lambda offsets, response: True,
'check_crcs': True,
'metadata_max_age_ms': 5 * 60 * 1000,
'partition_assignment_strategy': (RangePartitionAssignor, RoundRobinPartitionAssignor),
'heartbeat_interval_ms': 3000,
'session_timeout_ms': 30000,
'max_poll_records': 500,
'receive_buffer_bytes': None,
'send_buffer_bytes': None,
'socket_options': [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)],
'consumer_timeout_ms': float('inf'),
'skip_double_compressed_messages': False,
'security_protocol': 'PLAINTEXT',
'ssl_context': None,
'ssl_check_hostname': True,
'ssl_cafile': None,
'ssl_certfile': None,
'ssl_keyfile': None,
'ssl_crlfile': None,
'ssl_password': None,
'api_version': None,
'api_version_auto_timeout_ms': 2000,
'connections_max_idle_ms': 9 * 60 * 1000,
'metric_reporters': [],
'metrics_num_samples': 2,
'metrics_sample_window_ms': 30000,
'metric_group_prefix': 'consumer',
'selector': selectors.DefaultSelector,
'exclude_internal_topics': True,
'sasl_mechanism': None,
'sasl_plain_username': None,
'sasl_plain_password': None,
}
def __init__(self, *topics, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs.pop(key)
# Only check for extra config keys in top-level class
assert not configs, 'Unrecognized configs: %s' % configs
deprecated = {'smallest': 'earliest', 'largest': 'latest'}
if self.config['auto_offset_reset'] in deprecated:
new_config = deprecated[self.config['auto_offset_reset']]
log.warning('use auto_offset_reset=%s (%s is deprecated)',
new_config, self.config['auto_offset_reset'])
self.config['auto_offset_reset'] = new_config
request_timeout_ms = self.config['request_timeout_ms']
session_timeout_ms = self.config['session_timeout_ms']
fetch_max_wait_ms = self.config['fetch_max_wait_ms']
if request_timeout_ms <= session_timeout_ms:
raise KafkaConfigurationError(
"Request timeout (%s) must be larger than session timeout (%s)" %
(request_timeout_ms, session_timeout_ms))
if request_timeout_ms <= fetch_max_wait_ms:
raise KafkaConfigurationError("Request timeout (%s) must be larger than fetch-max-wait-ms (%s)" %
(request_timeout_ms, fetch_max_wait_ms))
metrics_tags = {'client-id': self.config['client_id']}
metric_config = MetricConfig(samples=self.config['metrics_num_samples'],
time_window_ms=self.config['metrics_sample_window_ms'],
tags=metrics_tags)
reporters = [reporter() for reporter in self.config['metric_reporters']]
self._metrics = Metrics(metric_config, reporters)
# TODO _metrics likely needs to be passed to KafkaClient, etc.
# api_version was previously a str. Accept old format for now
if isinstance(self.config['api_version'], str):
str_version = self.config['api_version']
if str_version == 'auto':
self.config['api_version'] = None
else:
self.config['api_version'] = tuple(map(int, str_version.split('.')))
log.warning('use api_version=%s [tuple] -- "%s" as str is deprecated',
str(self.config['api_version']), str_version)
self._client = KafkaClient(metrics=self._metrics, **self.config)
# Get auto-discovered version from client if necessary
if self.config['api_version'] is None:
self.config['api_version'] = self._client.config['api_version']
self._subscription = SubscriptionState(self.config['auto_offset_reset'])
self._fetcher = Fetcher(
self._client, self._subscription, self._metrics, **self.config)
self._coordinator = ConsumerCoordinator(
self._client, self._subscription, self._metrics,
assignors=self.config['partition_assignment_strategy'],
**self.config)
self._closed = False
self._iterator = None
self._consumer_timeout = float('inf')
if topics:
self._subscription.subscribe(topics=topics)
self._client.set_topics(topics)
def assign(self, partitions):
"""Manually assign a list of TopicPartitions to this consumer.
Arguments:
partitions (list of TopicPartition): Assignment for this instance.
Raises:
IllegalStateError: If consumer has already called
:meth:`~kafka.KafkaConsumer.subscribe`.
Warning:
It is not possible to use both manual partition assignment with
:meth:`~kafka.KafkaConsumer.assign` and group assignment with
:meth:`~kafka.KafkaConsumer.subscribe`.
Note:
This interface does not support incremental assignment and will
replace the previous assignment (if there was one).
Note:
Manual topic assignment through this method does not use the
consumer's group management functionality. As such, there will be
no rebalance operation triggered when group membership or cluster
and topic metadata change.
"""
self._subscription.assign_from_user(partitions)
self._client.set_topics([tp.topic for tp in partitions])
def assignment(self):
"""Get the TopicPartitions currently assigned to this consumer.
If partitions were directly assigned using
:meth:`~kafka.KafkaConsumer.assign`, then this will simply return the
same partitions that were previously assigned. If topics were
subscribed using :meth:`~kafka.KafkaConsumer.subscribe`, then this will
give the set of topic partitions currently assigned to the consumer
(which may be None if the assignment hasn't happened yet, or if the
partitions are in the process of being reassigned).
Returns:
set: {TopicPartition, ...}
"""
return self._subscription.assigned_partitions()
def close(self, autocommit=True):
"""Close the consumer, waiting indefinitely for any needed cleanup.
Keyword Arguments:
autocommit (bool): If auto-commit is configured for this consumer,
this optional flag causes the consumer to attempt to commit any
pending consumed offsets prior to close. Default: True
"""
if self._closed:
return
log.debug("Closing the KafkaConsumer.")
self._closed = True
self._coordinator.close(autocommit=autocommit)
self._metrics.close()
self._client.close()
try:
self.config['key_deserializer'].close()
except AttributeError:
pass
try:
self.config['value_deserializer'].close()
except AttributeError:
pass
log.debug("The KafkaConsumer has closed.")
def commit_async(self, offsets=None, callback=None):
"""Commit offsets to kafka asynchronously, optionally firing callback.
This commits offsets only to Kafka. The offsets committed using this API
will be used on the first fetch after every rebalance and also on
startup. As such, if you need to store offsets in anything other than
Kafka, this API should not be used. To avoid re-processing the last
message read if a consumer is restarted, the committed offset should be
the next message your application should consume, i.e.: last_offset + 1.
This is an asynchronous call and will not block. Any errors encountered
are either passed to the callback (if provided) or discarded.
Arguments:
offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict
to commit with the configured group_id. Defaults to currently
consumed offsets for all subscribed partitions.
callback (callable, optional): Called as callback(offsets, response)
with response as either an Exception or an OffsetCommitResponse
struct. This callback can be used to trigger custom actions when
a commit request completes.
Returns:
kafka.future.Future
"""
assert self.config['api_version'] >= (0, 8, 1), 'Requires >= Kafka 0.8.1'
assert self.config['group_id'] is not None, 'Requires group_id'
if offsets is None:
offsets = self._subscription.all_consumed_offsets()
log.debug("Committing offsets: %s", offsets)
future = self._coordinator.commit_offsets_async(
offsets, callback=callback)
return future
def commit(self, offsets=None):
"""Commit offsets to kafka, blocking until success or error.
This commits offsets only to Kafka. The offsets committed using this API
will be used on the first fetch after every rebalance and also on
startup. As such, if you need to store offsets in anything other than
Kafka, this API should not be used. To avoid re-processing the last
message read if a consumer is restarted, the committed offset should be
the next message your application should consume, i.e.: last_offset + 1.
Blocks until either the commit succeeds or an unrecoverable error is
encountered (in which case it is thrown to the caller).
Currently only supports kafka-topic offset storage (not zookeeper).
Arguments:
offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict
to commit with the configured group_id. Defaults to currently
consumed offsets for all subscribed partitions.
"""
assert self.config['api_version'] >= (0, 8, 1), 'Requires >= Kafka 0.8.1'
assert self.config['group_id'] is not None, 'Requires group_id'
if offsets is None:
offsets = self._subscription.all_consumed_offsets()
self._coordinator.commit_offsets_sync(offsets)
def committed(self, partition):
"""Get the last committed offset for the given partition.
This offset will be used as the position for the consumer
in the event of a failure.
This call may block to do a remote call if the partition in question
isn't assigned to this consumer or if the consumer hasn't yet
initialized its cache of committed offsets.
Arguments:
partition (TopicPartition): The partition to check.
Returns:
The last committed offset, or None if there was no prior commit.
"""
assert self.config['api_version'] >= (0, 8, 1), 'Requires >= Kafka 0.8.1'
assert self.config['group_id'] is not None, 'Requires group_id'
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
if self._subscription.is_assigned(partition):
committed = self._subscription.assignment[partition].committed
if committed is None:
self._coordinator.refresh_committed_offsets_if_needed()
committed = self._subscription.assignment[partition].committed
else:
commit_map = self._coordinator.fetch_committed_offsets([partition])
if partition in commit_map:
committed = commit_map[partition].offset
else:
committed = None
return committed
def topics(self):
"""Get all topics the user is authorized to view.
Returns:
set: topics
"""
cluster = self._client.cluster
if self._client._metadata_refresh_in_progress and self._client._topics:
future = cluster.request_update()
self._client.poll(future=future)
stash = cluster.need_all_topic_metadata
cluster.need_all_topic_metadata = True
future = cluster.request_update()
self._client.poll(future=future)
cluster.need_all_topic_metadata = stash
return cluster.topics()
def partitions_for_topic(self, topic):
"""Get metadata about the partitions for a given topic.
Arguments:
topic (str): Topic to check.
Returns:
set: Partition ids
"""
return self._client.cluster.partitions_for_topic(topic)
def poll(self, timeout_ms=0, max_records=None):
"""Fetch data from assigned topics / partitions.
Records are fetched and returned in batches by topic-partition.
On each poll, consumer will try to use the last consumed offset as the
starting offset and fetch sequentially. The last consumed offset can be
manually set through :meth:`~kafka.KafkaConsumer.seek` or automatically
set as the last committed offset for the subscribed list of partitions.
Incompatible with iterator interface -- use one or the other, not both.
Arguments:
timeout_ms (int, optional): Milliseconds spent waiting in poll if
data is not available in the buffer. If 0, returns immediately
with any records that are available currently in the buffer,
else returns empty. Must not be negative. Default: 0
max_records (int, optional): The maximum number of records returned
in a single call to :meth:`~kafka.KafkaConsumer.poll`.
Default: Inherit value from max_poll_records.
Returns:
dict: Topic to list of records since the last fetch for the
subscribed list of topics and partitions.
"""
assert timeout_ms >= 0, 'Timeout must not be negative'
if max_records is None:
max_records = self.config['max_poll_records']
# Poll for new data until the timeout expires
start = time.time()
remaining = timeout_ms
while True:
records = self._poll_once(remaining, max_records)
if records:
return records
elapsed_ms = (time.time() - start) * 1000
remaining = timeout_ms - elapsed_ms
if remaining <= 0:
return {}
def _poll_once(self, timeout_ms, max_records):
"""Do one round of polling. In addition to checking for new data, this does
any needed heart-beating, auto-commits, and offset updates.
Arguments:
timeout_ms (int): The maximum time in milliseconds to block.
Returns:
dict: Map of topic to list of records (may be empty).
"""
if self._use_consumer_group():
self._coordinator.ensure_coordinator_known()
self._coordinator.ensure_active_group()
# 0.8.2 brokers support kafka-backed offset storage via group coordinator
elif self.config['group_id'] is not None and self.config['api_version'] >= (0, 8, 2):
self._coordinator.ensure_coordinator_known()
# Fetch positions if we have partitions we're subscribed to that we
# don't know the offset for
if not self._subscription.has_all_fetch_positions():
self._update_fetch_positions(self._subscription.missing_fetch_positions())
# If data is available already, e.g. from a previous network client
# poll() call to commit, then just return it immediately
records, partial = self._fetcher.fetched_records(max_records)
if records:
# Before returning the fetched records, we can send off the
# next round of fetches and avoid block waiting for their
# responses to enable pipelining while the user is handling the
# fetched records.
if not partial:
self._fetcher.send_fetches()
return records
# Send any new fetches (won't resend pending fetches)
self._fetcher.send_fetches()
self._client.poll(timeout_ms=timeout_ms, sleep=True)
records, _ = self._fetcher.fetched_records(max_records)
return records
def position(self, partition):
"""Get the offset of the next record that will be fetched
Arguments:
partition (TopicPartition): Partition to check
Returns:
int: Offset
"""
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
assert self._subscription.is_assigned(partition), 'Partition is not assigned'
offset = self._subscription.assignment[partition].position
if offset is None:
self._update_fetch_positions([partition])
offset = self._subscription.assignment[partition].position
return offset
def highwater(self, partition):
"""Last known highwater offset for a partition.
A highwater offset is the offset that will be assigned to the next
message that is produced. It may be useful for calculating lag, by
comparing with the reported position. Note that both position and
highwater refer to the *next* offset -- i.e., highwater offset is
one greater than the newest available message.
Highwater offsets are returned in FetchResponse messages, so will
not be available if no FetchRequests have been sent for this partition
yet.
Arguments:
partition (TopicPartition): Partition to check
Returns:
int or None: Offset if available
"""
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
assert self._subscription.is_assigned(partition), 'Partition is not assigned'
return self._subscription.assignment[partition].highwater
def pause(self, *partitions):
"""Suspend fetching from the requested partitions.
Future calls to :meth:`~kafka.KafkaConsumer.poll` will not return any
records from these partitions until they have been resumed using
:meth:`~kafka.KafkaConsumer.resume`.
Note: This method does not affect partition subscription. In particular,
it does not cause a group rebalance when automatic assignment is used.
Arguments:
*partitions (TopicPartition): Partitions to pause.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
for partition in partitions:
log.debug("Pausing partition %s", partition)
self._subscription.pause(partition)
def paused(self):
"""Get the partitions that were previously paused using
:meth:`~kafka.KafkaConsumer.pause`.
Returns:
set: {partition (TopicPartition), ...}
"""
return self._subscription.paused_partitions()
def resume(self, *partitions):
"""Resume fetching from the specified (paused) partitions.
Arguments:
*partitions (TopicPartition): Partitions to resume.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
for partition in partitions:
log.debug("Resuming partition %s", partition)
self._subscription.resume(partition)
def seek(self, partition, offset):
"""Manually specify the fetch offset for a TopicPartition.
Overrides the fetch offsets that the consumer will use on the next
:meth:`~kafka.KafkaConsumer.poll`. If this API is invoked for the same
partition more than once, the latest offset will be used on the next
:meth:`~kafka.KafkaConsumer.poll`.
Note: You may lose data if this API is arbitrarily used in the middle of
consumption to reset the fetch offsets.
Arguments:
partition (TopicPartition): Partition for seek operation
offset (int): Message offset in partition
Raises:
AssertionError: If offset is not an int >= 0; or if partition is not
currently assigned.
"""
if not isinstance(partition, TopicPartition):
raise TypeError('partition must be a TopicPartition namedtuple')
assert isinstance(offset, int) and offset >= 0, 'Offset must be >= 0'
assert partition in self._subscription.assigned_partitions(), 'Unassigned partition'
log.debug("Seeking to offset %s for partition %s", offset, partition)
self._subscription.assignment[partition].seek(offset)
def seek_to_beginning(self, *partitions):
"""Seek to the oldest available offset for partitions.
Arguments:
*partitions: Optionally provide specific TopicPartitions, otherwise
default to all assigned partitions.
Raises:
AssertionError: If any partition is not currently assigned, or if
no partitions are assigned.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
if not partitions:
partitions = self._subscription.assigned_partitions()
assert partitions, 'No partitions are currently assigned'
else:
for p in partitions:
assert p in self._subscription.assigned_partitions(), 'Unassigned partition'
for tp in partitions:
log.debug("Seeking to beginning of partition %s", tp)
self._subscription.need_offset_reset(tp, OffsetResetStrategy.EARLIEST)
def seek_to_end(self, *partitions):
"""Seek to the most recent available offset for partitions.
Arguments:
*partitions: Optionally provide specific TopicPartitions, otherwise
default to all assigned partitions.
Raises:
AssertionError: If any partition is not currently assigned, or if
no partitions are assigned.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
if not partitions:
partitions = self._subscription.assigned_partitions()
assert partitions, 'No partitions are currently assigned'
else:
for p in partitions:
assert p in self._subscription.assigned_partitions(), 'Unassigned partition'
for tp in partitions:
log.debug("Seeking to end of partition %s", tp)
self._subscription.need_offset_reset(tp, OffsetResetStrategy.LATEST)
def subscribe(self, topics=(), pattern=None, listener=None):
"""Subscribe to a list of topics, or a topic regex pattern.
Partitions will be dynamically assigned via a group coordinator.
Topic subscriptions are not incremental: this list will replace the
current assignment (if there is one).
This method is incompatible with :meth:`~kafka.KafkaConsumer.assign`.
Arguments:
topics (list): List of topics for subscription.
pattern (str): Pattern to match available topics. You must provide
either topics or pattern, but not both.
listener (ConsumerRebalanceListener): Optionally include listener
callback, which will be called before and after each rebalance
operation.
As part of group management, the consumer will keep track of the
list of consumers that belong to a particular group and will
trigger a rebalance operation if one of the following events
trigger:
* Number of partitions change for any of the subscribed topics
* Topic is created or deleted
* An existing member of the consumer group dies
* A new member is added to the consumer group
When any of these events are triggered, the provided listener
will be invoked first to indicate that the consumer's assignment
has been revoked, and then again when the new assignment has
been received. Note that this listener will immediately override
any listener set in a previous call to subscribe. It is
guaranteed, however, that the partitions revoked/assigned
through this interface are from topics subscribed in this call.
Raises:
IllegalStateError: If called after previously calling
:meth:`~kafka.KafkaConsumer.assign`.
AssertionError: If neither topics or pattern is provided.
TypeError: If listener is not a ConsumerRebalanceListener.
"""
# SubscriptionState handles error checking
self._subscription.subscribe(topics=topics,
pattern=pattern,
listener=listener)
# Regex will need all topic metadata
if pattern is not None:
self._client.cluster.need_all_topic_metadata = True
self._client.set_topics([])
self._client.cluster.request_update()
log.debug("Subscribed to topic pattern: %s", pattern)
else:
self._client.cluster.need_all_topic_metadata = False
self._client.set_topics(self._subscription.group_subscription())
log.debug("Subscribed to topic(s): %s", topics)
def subscription(self):
"""Get the current topic subscription.
Returns:
set: {topic, ...}
"""
return self._subscription.subscription.copy()
def unsubscribe(self):
"""Unsubscribe from all topics and clear all assigned partitions."""
self._subscription.unsubscribe()
self._coordinator.close()
self._client.cluster.need_all_topic_metadata = False
self._client.set_topics([])
log.debug("Unsubscribed all topics or patterns and assigned partitions")
def metrics(self, raw=False):
"""Warning: this is an unstable interface.
It may change in future releases without warning"""
if raw:
return self._metrics.metrics
metrics = {}
for k, v in self._metrics.metrics.items():
if k.group not in metrics:
metrics[k.group] = {}
if k.name not in metrics[k.group]:
metrics[k.group][k.name] = {}
metrics[k.group][k.name] = v.value()
return metrics
def offsets_for_times(self, timestamps):
"""Look up the offsets for the given partitions by timestamp. The
returned offset for each partition is the earliest offset whose
timestamp is greater than or equal to the given timestamp in the
corresponding partition.
This is a blocking call. The consumer does not have to be assigned the
partitions.
If the message format version in a partition is before 0.10.0, i.e.
the messages do not have timestamps, ``None`` will be returned for that
partition. ``None`` will also be returned for the partition if there
are no messages in it.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
timestamps (dict): ``{TopicPartition: int}`` mapping from partition
to the timestamp to look up. Unit should be milliseconds since
beginning of the epoch (midnight Jan 1, 1970 (UTC))
Returns:
``{TopicPartition: OffsetAndTimestamp}``: mapping from partition
to the timestamp and offset of the first message with timestamp
greater than or equal to the target timestamp.
Raises:
ValueError: If the target timestamp is negative
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms
"""
if self.config['api_version'] <= (0, 10, 0):
raise UnsupportedVersionError(
"offsets_for_times API not supported for cluster version {}"
.format(self.config['api_version']))
for tp, ts in timestamps.items():
timestamps[tp] = int(ts)
if ts < 0:
raise ValueError(
"The target time for partition {} is {}. The target time "
"cannot be negative.".format(tp, ts))
return self._fetcher.get_offsets_by_times(
timestamps, self.config['request_timeout_ms'])
def beginning_offsets(self, partitions):
"""Get the first offset for the given partitions.
This method does not change the current consumer position of the
partitions.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
partitions (list): List of TopicPartition instances to fetch
offsets for.
Returns:
``{TopicPartition: int}``: The earliest available offsets for the
given partitions.
Raises:
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms.
"""
if self.config['api_version'] <= (0, 10, 0):
raise UnsupportedVersionError(
"offsets_for_times API not supported for cluster version {}"
.format(self.config['api_version']))
offsets = self._fetcher.beginning_offsets(
partitions, self.config['request_timeout_ms'])
return offsets
def end_offsets(self, partitions):
"""Get the last offset for the given partitions. The last offset of a
partition is the offset of the upcoming message, i.e. the offset of the
last available message + 1.
This method does not change the current consumer position of the
partitions.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
partitions (list): List of TopicPartition instances to fetch
offsets for.
Returns:
``{TopicPartition: int}``: The end offsets for the given partitions.
Raises:
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms
"""
if self.config['api_version'] <= (0, 10, 0):
raise UnsupportedVersionError(
"offsets_for_times API not supported for cluster version {}"
.format(self.config['api_version']))
offsets = self._fetcher.end_offsets(
partitions, self.config['request_timeout_ms'])
return offsets
def _use_consumer_group(self):
"""Return True iff this consumer can/should join a broker-coordinated group."""
if self.config['api_version'] < (0, 9):
return False
elif self.config['group_id'] is None:
return False
elif not self._subscription.partitions_auto_assigned():
return False
return True
def _update_fetch_positions(self, partitions):
"""Set the fetch position to the committed position (if there is one)
or reset it using the offset reset policy the user has configured.
Arguments:
partitions (List[TopicPartition]): The partitions that need
updating fetch positions.
Raises:
NoOffsetForPartitionError: If no offset is stored for a given
partition and no offset reset policy is defined.
"""
if (self.config['api_version'] >= (0, 8, 1) and
self.config['group_id'] is not None):
# Refresh commits for all assigned partitions
self._coordinator.refresh_committed_offsets_if_needed()
# Then, do any offset lookups in case some positions are not known
self._fetcher.update_fetch_positions(partitions)
def _message_generator(self):
assert self.assignment() or self.subscription() is not None, 'No topic subscription or manual partition assignment'
while time.time() < self._consumer_timeout:
if self._use_consumer_group():
self._coordinator.ensure_coordinator_known()
self._coordinator.ensure_active_group()
# 0.8.2 brokers support kafka-backed offset storage via group coordinator
elif self.config['group_id'] is not None and self.config['api_version'] >= (0, 8, 2):
self._coordinator.ensure_coordinator_known()
# Fetch offsets for any subscribed partitions that we arent tracking yet
if not self._subscription.has_all_fetch_positions():
partitions = self._subscription.missing_fetch_positions()
self._update_fetch_positions(partitions)
poll_ms = 1000 * (self._consumer_timeout - time.time())
if not self._fetcher.in_flight_fetches():
poll_ms = 0
self._client.poll(timeout_ms=poll_ms, sleep=True)
# We need to make sure we at least keep up with scheduled tasks,
# like heartbeats, auto-commits, and metadata refreshes
timeout_at = self._next_timeout()
# Because the consumer client poll does not sleep unless blocking on
# network IO, we need to explicitly sleep when we know we are idle
# because we haven't been assigned any partitions to fetch / consume
if self._use_consumer_group() and not self.assignment():
sleep_time = max(timeout_at - time.time(), 0)
if sleep_time > 0 and not self._client.in_flight_request_count():
log.debug('No partitions assigned; sleeping for %s', sleep_time)
time.sleep(sleep_time)
continue
# Short-circuit the fetch iterator if we are already timed out
# to avoid any unintentional interaction with fetcher setup
if time.time() > timeout_at:
continue
for msg in self._fetcher:
yield msg
if time.time() > timeout_at:
log.debug("internal iterator timeout - breaking for poll")
break
# An else block on a for loop only executes if there was no break
# so this should only be called on a StopIteration from the fetcher
# We assume that it is safe to init_fetches when fetcher is done
# i.e., there are no more records stored internally
else:
self._fetcher.send_fetches()
def _next_timeout(self):
timeout = min(self._consumer_timeout,
self._client._delayed_tasks.next_at() + time.time(),
self._client.cluster.ttl() / 1000.0 + time.time())
# Although the delayed_tasks timeout above should cover processing
# HeartbeatRequests, it is still possible that HeartbeatResponses
# are left unprocessed during a long _fetcher iteration without
# an intermediate poll(). And because tasks are responsible for
# rescheduling themselves, an unprocessed response will prevent
# the next heartbeat from being sent. This check should help
# avoid that.
if self._use_consumer_group():
heartbeat = time.time() + self._coordinator.heartbeat.ttl()
timeout = min(timeout, heartbeat)
return timeout
def __iter__(self): # pylint: disable=non-iterator-returned
return self
def __next__(self):
if not self._iterator:
self._iterator = self._message_generator()
self._set_consumer_timeout()
try:
return next(self._iterator)
except StopIteration:
self._iterator = None
raise
def _set_consumer_timeout(self):
# consumer_timeout_ms can be used to stop iteration early
if self.config['consumer_timeout_ms'] >= 0:
self._consumer_timeout = time.time() + (
self.config['consumer_timeout_ms'] / 1000.0)
# Old KafkaConsumer methods are deprecated
def configure(self, **configs):
raise NotImplementedError(
'deprecated -- initialize a new consumer')
def set_topic_partitions(self, *topics):
raise NotImplementedError(
'deprecated -- use subscribe() or assign()')
def fetch_messages(self):
raise NotImplementedError(
'deprecated -- use poll() or iterator interface')
def get_partition_offsets(self, topic, partition,
request_time_ms, max_num_offsets):
raise NotImplementedError(
'deprecated -- send an OffsetRequest with KafkaClient')
def offsets(self, group=None):
raise NotImplementedError('deprecated -- use committed(partition)')
def task_done(self, message):
raise NotImplementedError(
'deprecated -- commit offsets manually if needed')
| [
"[email protected]"
]
| |
c16526cc565c48f7f41dbc963e284d4f5ce44160 | 3e1fcf34eae508a3f3d4668edfb334069a88db3d | /court_scraper/configs.py | 3c97d17d3c3bde34e18c1f667fb59a09be10a102 | [
"ISC"
]
| permissive | mscarey/court-scraper | 26d32cb7354b05bb5d5d27a55bf4042e5dde1a4d | e29135331526a11aa5eb0445a9223fc3f7630895 | refs/heads/main | 2023-07-14T20:23:33.488766 | 2020-08-31T14:02:19 | 2020-08-31T14:02:19 | 384,977,976 | 0 | 0 | ISC | 2021-07-11T15:04:57 | 2021-07-11T15:04:57 | null | UTF-8 | Python | false | false | 539 | py | import os
from pathlib import Path
class Configs:
def __init__(self):
try:
self.cache_dir = os.environ['COURT_SCRAPER_DIR']
except KeyError:
self.cache_dir = str(
Path(os.path.expanduser('~'))\
.joinpath('.court-scraper')
)
self.config_file_path = str(
Path(self.cache_dir)\
.joinpath('config.yaml')
)
self.db_path = str(
Path(self.cache_dir)\
.joinpath('cases.db')
)
| [
"[email protected]"
]
| |
4686304e5272d38d5559b24f2410068350599bea | 9130bdbd90b7a70ac4ae491ddd0d6564c1c733e0 | /venv/lib/python3.8/site-packages/pylsp/python_lsp.py | 78c538c324a0083a40dd1865f74748fc992446cd | []
| no_license | baruwaa12/Projects | 6ca92561fb440c63eb48c9d1114b3fc8fa43f593 | 0d9a7b833f24729095308332b28c1cde63e9414d | refs/heads/main | 2022-10-21T14:13:47.551218 | 2022-10-09T11:03:49 | 2022-10-09T11:03:49 | 160,078,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/aa/42/2f/669662002785de21312365d0e6f7beb46001f06bc2fa9fd2af8f9da7e6 | [
"[email protected]"
]
| |
5f2aa8ca47d120e199ee3e9b6210bbc7d474e2f3 | 4a0348ccb890c73ebd88feafafc279af26e05f25 | /django/django_intro/first2/manage.py | 9fdc1b41afe7cc2a027d9ccbcbc59b711bd4fda3 | []
| no_license | wadeeeawwad/python_stack | 00936837103b9f78f66961d88ae3a6233adbbea3 | 6d2c3712c40b035e0d43cc7a27b2e2f48d4a8281 | refs/heads/master | 2023-07-11T14:59:02.617899 | 2021-08-23T11:37:15 | 2021-08-23T11:37:15 | 364,533,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'first2.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
a94d4f6646875930d94d09068b21013e8e11c0b4 | 19d47d47c9614dddcf2f8d744d883a90ade0ce82 | /pynsxt/swagger_client/models/app_info_host_vm_list_in_csv_format.py | c68bd8aec7c8d133e43bc961f5b83387b9a11720 | []
| no_license | darshanhuang1/pynsxt-1 | 9ed7c0da9b3a64e837a26cbbd8b228e811cee823 | fb1091dff1af7f8b8f01aec715682dea60765eb8 | refs/heads/master | 2020-05-25T14:51:09.932853 | 2018-05-16T12:43:48 | 2018-05-16T12:43:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,356 | py | # coding: utf-8
"""
NSX API
VMware NSX REST API # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.app_info_host_vm_csv_record import AppInfoHostVmCsvRecord # noqa: F401,E501
from swagger_client.models.csv_list_result import CsvListResult # noqa: F401,E501
class AppInfoHostVmListInCsvFormat(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'file_name': 'str',
'results': 'list[AppInfoHostVmCsvRecord]'
}
attribute_map = {
'file_name': 'file_name',
'results': 'results'
}
def __init__(self, file_name=None, results=None): # noqa: E501
"""AppInfoHostVmListInCsvFormat - a model defined in Swagger""" # noqa: E501
self._file_name = None
self._results = None
self.discriminator = None
if file_name is not None:
self.file_name = file_name
if results is not None:
self.results = results
@property
def file_name(self):
"""Gets the file_name of this AppInfoHostVmListInCsvFormat. # noqa: E501
File name set by HTTP server if API returns CSV result as a file. # noqa: E501
:return: The file_name of this AppInfoHostVmListInCsvFormat. # noqa: E501
:rtype: str
"""
return self._file_name
@file_name.setter
def file_name(self, file_name):
"""Sets the file_name of this AppInfoHostVmListInCsvFormat.
File name set by HTTP server if API returns CSV result as a file. # noqa: E501
:param file_name: The file_name of this AppInfoHostVmListInCsvFormat. # noqa: E501
:type: str
"""
self._file_name = file_name
@property
def results(self):
"""Gets the results of this AppInfoHostVmListInCsvFormat. # noqa: E501
List of appplications discovered during an application discovery session # noqa: E501
:return: The results of this AppInfoHostVmListInCsvFormat. # noqa: E501
:rtype: list[AppInfoHostVmCsvRecord]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this AppInfoHostVmListInCsvFormat.
List of appplications discovered during an application discovery session # noqa: E501
:param results: The results of this AppInfoHostVmListInCsvFormat. # noqa: E501
:type: list[AppInfoHostVmCsvRecord]
"""
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AppInfoHostVmListInCsvFormat):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
6f1ce69f66b79c11989426517bab38e317a3e9f1 | 0b63f38c7fb468e478e5be82c685de1b7ddb87e5 | /meiduo/meiduo_mall/meiduo_mall/apps/goods/serializers.py | 5f87ef206f4094af198abe31f08914950ba75438 | [
"MIT"
]
| permissive | Highsir/Simplestore | fcf5ef81a754604c0953a3c1433a7bc09290c121 | 5fc4d9930b0cd1e115f8c6ebf51cd9e28922d263 | refs/heads/master | 2020-09-01T07:55:45.362457 | 2019-11-01T04:55:48 | 2019-11-01T04:55:48 | 218,913,913 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | from drf_haystack.serializers import HaystackSerializer
from rest_framework import serializers
from goods.models import GoodsCategory, GoodsChannel, SKU
from goods.search_indexes import SKUIndex
class CategorySerializer(serializers.ModelSerializer):
"""类别序列化器"""
class Meta:
model = GoodsCategory
fields = ('id','name')
class ChannelSerializer(serializers.ModelSerializer):
"""频道序列化器"""
category = CategorySerializer
class Meta:
model = GoodsChannel
fields = ('category','url')
class SKUSerializer(serializers.ModelSerializer):
"""
序列化器输出商品sku信息
"""
class Meta:
# 输出:序列化字段
model = SKU
fields = ('id','name','price','default_image_url','comments')
class SKUIndexSerializer(HaystackSerializer):
"""SKU索引结果数据序列化器"""
class Meta:
index_classes = [SKUIndex]
fields = ('text', 'id', 'name', 'price', 'default_image_url', 'comments') | [
"[email protected]"
]
| |
1979d64a1540d510194a1064ab3dd19ceaa3585b | b511bcf3b3c8724a321caa95f381956f56c81197 | /collective/wpadmin/widgets/draft.py | c1c4dd4bfba27029e4bbf9f9d56d38ede2eb8eca | []
| no_license | toutpt/collective.wpadmin | 6957f8fadd5f62a12e4b5cd3eb40794874712cea | b5f2384ff2421f1529f7f844d75c1cb4073ac959 | refs/heads/master | 2016-08-05T00:30:36.097097 | 2013-01-18T10:37:26 | 2013-01-18T10:37:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | from zope import component
from plone import api
from plone.registry.interfaces import IRegistry
from collective.wpadmin.widgets import widget
from collective.wpadmin import i18n
_ = i18n.messageFactory
class Draft(widget.Widget):
name = "draft"
title = _(u"Draft")
content_template_name = "draft.pt"
def get_drafts(self):
registry = component.getUtility(IRegistry)
key = 'collective.wpadmin.settings.WPAdminSettings.blog_type'
post_type = registry.get(key, 'News Item')
query = self.get_query()
query['review_state'] = 'private'
query['Creator'] = api.user.get_current().getId()
query['portal_type'] = post_type
brains = self.query_catalog(query)
return brains
| [
"[email protected]"
]
| |
00766e298a33dcae5f92d7859cc87d876ccca112 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2463/60782/304860.py | a0914fcd8b479f7c6f75f9999f2477a83b960f6a | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | """
题目描述
给定一个已按照升序排列 的有序数组,找到两个数使得它们相加之和等于目标数。
函数应该返回这两个下标值 index1 和 index2,其中 index1 必须小于 index2。
说明:
返回的下标值(index1 和 index2)不是从零开始的。
你可以假设每个输入只对应唯一的答案,而且你不可以重复使用相同的元素。
"""
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
dic = {}
li = []
for i in range(len(numbers)):
if numbers[i] in dic.keys():
# 将原始值和差值的下标分别添加到li中
li.append(dic[numbers[i]] + 1) # 原始值的下标
li.append(i + 1) # 差值的下标
return li
# 将每个值的差值及对应的下标, 保存在字典中
dic[target - numbers[i]] = i
return None
s = Solution()
print(s.twoSum(list(map(int, input().split(", "))), int(input())))
| [
"[email protected]"
]
| |
a9003fdff24c89d3d9fa50bcfc64c24a0cc79586 | a24a03163cf643249922edc29bc2086517615e53 | /thewema/urls.py | 7bcf11a899a1294d7c8cbb12dff05605f0faab60 | []
| no_license | ErickMwazonga/The-Wema-Academy | 165203e8e337459f6bae4f7178b3bfad715f052a | 61f9b778e423326d8dbd2c04f2dd6ce19e15e2a9 | refs/heads/master | 2021-01-19T14:22:00.568982 | 2017-04-13T10:41:06 | 2017-04-13T10:41:06 | 88,153,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,004 | py | """wema URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from . import views
from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import AuthenticationForm
app_name = 'thewema'
urlpatterns = [
# url(r'^$', views.index_view, name='index'),
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^students$', views.StudentListView.as_view(), name='students'),
url(r'^student$', views.StudentCreateView.as_view(), name='student'),
url(r'^student/(?P<pk>[0-9]+)/$', views.StudentDetailView.as_view(), name='student_detail'),
url(r'^class$', views.StudentClassCreateView.as_view(), name='sclass'),
url(r'^classes$', views.StudentClassListView.as_view(), name='classes'),
url(r'^exam$', views.ExamCreateView.as_view(), name='exam'),
url(r'^score$', views.ScoreCreateView.as_view(), name='score'),
url(r'^scores$', views.ScoreListView.as_view(), name='scores'),
url(r'^scores/(?P<pk>[0-9]+)/$', views.ScoreDetailView.as_view(), name='score_detail'),
url(r'^feedback$', views.FeedbackCreateView.as_view(), name='feedback'),
url(r'^login$', auth_views.login, {
'template_name': 'thewema/login.html',
'authentication_form': AuthenticationForm
},
name='login'
),
url(r'^logout/$', auth_views.logout_then_login, {'login_url': 'thewema:login'}, name='logout'),
]
| [
"[email protected]"
]
| |
bd0ba877cb6b849000ce9ea154a7506ab94dbb97 | 2d735cd72f1b2a17e58397a1214d3bcc2b8f113f | /PYTHON_FUNCTIONS/any_all_in_python.py | c4e84d22e60c5fd4da0ce9f654e5655dd7651839 | []
| no_license | shubhamrocks888/python | 3b95b5b53be8e0857efe72b8797e01e959d230f4 | 7313ddd0d09a0b478df928a07a6094930b597132 | refs/heads/master | 2022-12-15T00:03:40.261942 | 2020-08-29T18:00:42 | 2020-08-29T18:00:42 | 279,280,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,812 | py | Truth table :-
any all
All true values True True
All false values False False
One True(all others are False) True False
One False(all others are True) True False
Empty False True
##Any and All are two built ins provided in python used for successive And/Or.
'''Any'''
Returns true if any of the items is True. It returns False if empty or all are false.
Any can be thought of as a sequence of OR operations on the provided iterables.
It short circuit the execution i.e. stop the execution as soon as the result is known.
Syntax : any(list of iterables)
# Since all are false, false is returned
print (any([False, False, False, False])) # Output: False
# Here the method will short-circuit at the
# second item (True) and will return True.
print (any([False, True, False, False])) # Output: True
# Here the method will short-circuit at the
# first (True) and will return True.
print (any([True, False, False, False])) # Output: True
'''All'''
Returns true if all of the items are True (or if the iterable is empty). All can be thought
of as a sequence of AND operations on the provided iterables. It also short circuit the
execution i.e. stop the execution as soon as the result is known.
Syntax : all(list of iterables)
# Here all the iterables are True so all
# will return True and the same will be printed
print (all([True, True, True, True])) # Output: True
# Here the method will short-circuit at the
# first item (False) and will return False.
print (all([False, True, True, False])) # Output: False
# This statement will return False, as no
# True is found in the iterables
print (all([False, False, False])) # Output: False
Practical Examples:
# This code explains how can we
# use 'any' function on list
list1 = []
list2 = []
# Index ranges from 1 to 10 to multiply
for i in range(1,11):
list1.append(4*i)
# Index to access the list2 is from 0 to 9
for i in range(0,10):
list2.append(list1[i]%5==0)
print('See whether at least one number is divisible by 5 in list 1=>')
print(any(list2))
Output:
See whether at least one number is divisible by 5 in list 1=>
True
# Illustration of 'all' function in python 3
# Take two lists
list1=[]
list2=[]
# All numbers in list1 are in form: 4*i-3
for i in range(1,21):
list1.append(4*i-3)
# list2 stores info of odd numbers in list1
for i in range(0,20):
list2.append(list1[i]%2==1)
print('See whether all numbers in list1 are odd =>')
print(all(list2))
Output:
See whether all numbers in list1 are odd =>
True
| [
"[email protected]"
]
| |
c22f8acacd79b8afcf53558dbd03b826832af27a | 8580fd92512c236deae692d155bdb5eab2e00508 | /DarkTrails/asgi.py | 7b723533039a12cf02182a7076964bb2881d83f3 | []
| no_license | JackSnowdon/DownDT | d5d7f04acf92b5102cf67c5aa70cda2ebc4062fd | 17924b0b64da39d29c892fee4c7746d09b76fd8c | refs/heads/master | 2023-04-01T00:25:16.382696 | 2021-03-28T16:19:26 | 2021-03-28T16:19:26 | 352,373,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for DarkTrails project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DarkTrails.settings')
application = get_asgi_application()
| [
"[email protected]"
]
| |
2bdaf389b5e48d429d1b3b05b8a493621a9a7ed6 | 144d8f2a5a5c751cebaabc73f2e2b82fa23c61c1 | /nebula_sniffer/nebula_sniffer/main.py | 6a4f42627e97abb132cf4cf0da49e18e7fe9ab3a | [
"Apache-2.0"
]
| permissive | bradbann/sniffer | f248697cf4b483a7af1e43a08d3cc6e420b21d99 | 3ef3ad5316942669f32cda7d0c96f5a8c441efc2 | refs/heads/master | 2020-04-28T04:38:00.496351 | 2019-03-11T10:56:37 | 2019-03-11T10:56:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,663 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import os
import logging
import traceback
import gevent
import gevent.queue
import settings
from threathunter_common.metrics.metricsrecorder import MetricsRecorder
from nebula_parser.autoparser import get_current_generators
from .urltree import URLTree
from .utils import is_linux
from .bson.objectid import ObjectId
from .msg import TextMsg, HttpMsg
from .sessionmapping import *
max_body_length_config = configcontainer.get_config("sniffer").int_item("sniffer.httpmsg.max_body",
caching=3600, default=2048)
class Main(object):
def __init__(self, id, parser, driver, cpu=None, is_process=True):
self.parser = parser
self.driver = driver
self.id = id
self._running = False
self._rpc_task = None
self._events_task = None
self._health_task = None
self.queue = gevent.queue.Queue(maxsize=10000)
self.cpu = cpu
self.is_process = is_process
self.logger = settings.init_logging("main.{}".format(self.id))
self.error_mr = MetricsRecorder("sniffer.main.error")
self.msg_mr = MetricsRecorder("sniffer.main.msg")
self.event_mr = MetricsRecorder("sniffer.main.event")
self.rpc_mr = MetricsRecorder("sniffer.main.rpc")
self.main_mr = MetricsRecorder("sniffer.main.loop")
self.urltree = URLTree()
def add_error_metrics(self, data_type):
tags = {"id": self.id, "type": data_type}
self.error_mr.record(1, tags)
def start(self):
if self._running:
return
self.main_mr.record(1, {"id": self.id, "type": "start"})
# cpu binding
self.logger.info("process %s binding to cpu %s", os.getpid(), self.cpu)
if is_linux() and self.cpu and self.is_process:
# taskset 用于查看、设定 CPU 核使用情况的命令。 可以用 taskset 启动一个命令,直接设置它的 CPU 核的运行依赖关系。
# self.cpu = 1
subprocess.Popen(["taskset", "-cp", "{}".format(self.cpu), "{}".format(os.getpid())],
stderr=subprocess.PIPE, stdout=subprocess.PIPE).communicate()
self._running = True
self.logger.info("sniffer instance is starting driver")
if self.driver:
self.driver.start()
self.logger.info("sniffer instance is starting rpc task")
self._rpc_task = gevent.spawn(self.rpc_processor)
self._rpc_task.start()
# parse event for httpmsg
self.logger.info("sniffer instance is starting events task")
self._events_task = gevent.spawn(self.event_processor)
self._events_task.start()
self.logger.info("sniffer instance is starting healthy task")
self._health_task = gevent.spawn(self.health_processor)
self._health_task.start()
self.urltree.synchronize()
def stop(self):
self._running = False
self.logger.info("sniffer instance is stopping rpc task")
self.main_mr.record(1, {"id": self.id, "type": "stop"})
if self._rpc_task:
self._rpc_task.kill()
self.logger.info("sniffer instance is stopping events task")
if self._events_task:
self._events_task.kill()
self.logger.info("sniffer instance is stopping healthy task")
if self._health_task:
self._health_task.kill()
self.logger.info("sniffer instance is stopping driver")
if self.driver:
self.driver.stop()
def close(self):
self.stop()
def __del__(self):
self.stop()
def event_processor(self):
idle_run = 0
while self._running:
# no events coming
if idle_run > 0 and idle_run % 5 == 0:
# idle sleep for 0.5 seconds
gevent.sleep(0.5)
if idle_run % 100 == 0:
self.logger.debug("no msg in the last short time")
self.main_mr.record(1, {"id": self.id, "type": "idle"})
try:
msg = self.driver.get_msg_nowait()
except Exception as ex:
# no msg yet
msg = None
if not msg:
idle_run += 1
continue
else:
idle_run = 0
# msg common processing
try:
self.msg_mr.record(1, {"id": self.id, "type": "input"})
self.logger.debug("start to process msg %s", msg)
# 开始bones折叠
self.urltree.synchronize()
uri_stem = msg.uri_stem
page = msg.page
if msg.is_static:
# 静态页面特殊逻辑
new_url = msg.host + '/****.' + msg.page.rsplit('.', 1)[-1]
msg.uri_stem = msg.page = new_url
elif page == uri_stem:
# no normalization yet
new_page, new_params = self.urltree.normalize_url(page)
if new_page != page:
msg.uri_stem = new_page
msg.page = new_page
new_params = '&'.join(['%s=%s' % (k, v) for k, v in new_params.iteritems()])
old_params = msg.uri_query
if old_params:
new_params = old_params + '&' + new_params
msg.uri_query = new_params
# msg specific processing per customer
if self.parser.filter(msg):
self.logger.debug("filtered by customparsers")
self.msg_mr.record(1, {"id": self.id, "type": "drop"})
continue
self.logger.debug("msg has passed the filter")
events = []
if isinstance(msg, HttpMsg):
# parse 实际入口,对http信息进行处理,返回一个events(事件列表)
events = self.parser.get_events_from_http_msg(msg)
elif isinstance(msg, TextMsg):
events = self.parser.get_events_from_text_msg(msg)
else:
self.logger.error("fail to process this type of event")
self.add_error_metrics("parse failure")
continue
http_events = [e for e in events if e.name in {"HTTP_DYNAMIC", "HTTP_STATIC"}]
if not http_events:
continue
# 取第一个是因为所有的,客户处理模块中第一个处理函数都是extract_http_log_event()
http_event = http_events[0]
# try autoparsers
for g in get_current_generators():
result = g.parse_event(http_event, msg)
if result:
events.append(result)
if not events:
continue
self.logger.debug("msg has generated %d events", len(events))
self.msg_mr.record(1, {"id": self.id, "type": "output"})
self.event_mr.record(len(events), {"id": self.id, "type": "input"})
# this is an ugly version, need a totally new one
# processing id and pid
httpid = "0" * 24
for ev in events:
if ev.name in {"HTTP_DYNAMIC", "HTTP_STATIC"}:
ev.property_values["pid"] = "0" * 24
httpid = ev.property_values["id"]
for ev in events:
if ev.name not in {"HTTP_DYNAMIC", "HTTP_STATIC"}:
ev.property_values["id"] = str(ObjectId())
ev.property_values["pid"] = httpid
# "processing uid/did/sid"
id_dict = {
"uid": "",
"did": "",
"sid": "",
}
for ev in events:
for key in id_dict.keys():
if ev.property_values.get(key):
id_dict[key] = ev.property_values[key]
if ev.name == "ACCOUNT_LOGIN":
id_dict["uid"] = ev.property_values["user_name"]
store_user_session_mapping(id_dict["uid"], id_dict["sid"])
if ev.name == "ACCOUNT_REGISTRATION":
id_dict["uid"] = ev.property_values["user_name"]
store_user_session_mapping(id_dict["uid"], id_dict["sid"])
if not id_dict["uid"] or id_dict["uid"].startswith("fake"):
t = get_user_from_session(id_dict["sid"])
if t:
id_dict["uid"] = t
self.logger.debug("get id for this batch of events %s", id_dict)
for ev in events:
ev.property_values.update(id_dict)
_max_length = max_body_length_config.get()
for ev in events:
# body should not be too long
if "s_body" in ev.property_values:
ev.property_values["s_body"] = ev.property_values["s_body"][:_max_length]
if "c_body" in ev.property_values:
ev.property_values["c_body"] = ev.property_values["c_body"][:_max_length]
# end of the ugly code
for ev in events:
self.logger.debug("get event %s", ev)
self.queue.put_nowait(ev)
self.event_mr.record(len(events), {"id": self.id, "type": "output"})
except:
# todo add metrics
self.add_error_metrics("main process failure")
self.msg_mr.record(1, {"id": self.id, "type": "drop"})
self.logger.error("fail to process, error %s", traceback.format_exc())
def health_processor(self):
while self._running:
if self.driver and not self.driver.is_alive():
self._running = False
gevent.sleep(5)
def rpc_processor(self):
mode = configcontainer.get_config("sniffer").get_string("sniffer.servicemode", "redis")
if mode == "redis":
import redisserviceclient
http_client = redisserviceclient.get_httplog_rpc_client()
misc_client = redisserviceclient.get_misclog_rpc_client()
elif mode == "rabbitmq":
import rabbitmqserviceclient
amqp_url = configcontainer.get_config("sniffer").get_string("sniffer.amqp_url", "")
http_client = rabbitmqserviceclient.get_httplog_rpc_client(amqp_url)
misc_client = rabbitmqserviceclient.get_misclog_rpc_client(amqp_url)
else:
self.add_error_metrics("invalid service")
raise RuntimeError("invalid service mode")
http_client.start()
misc_client.start()
idle_run = 0
events_sent = 0
r = 0
event = None
while self._running:
r += 1
try:
events_sent = 0
event = self.queue.get_nowait()
self.rpc_mr.record(1, {"id": self.id, "type": "input", "mode": mode, "name": event.name})
if event.name == "HTTP_DYNAMIC" or event.name == "HTTP_STATIC":
if event.property_values["is_static"]:
# remove redundant values
event.property_values["s_body"] = ""
event.property_values["c_body"] = ""
event.property_values["cookie"] = ""
event.key = event.property_values["c_ip"]
http_client.send(event, event.key, False)
self.logger.debug("sending an http event on key %s", event.key)
self.rpc_mr.record(1, {"id": self.id, "type": "output", "mode": mode, "name": event.name})
else:
misc_client.send(event, event.key, False)
self.logger.debug("sending an %s event on key %s", event.name, event.key)
self.rpc_mr.record(1, {"id": self.id, "type": "output", "mode": mode, "name": event.name})
events_sent = 1
event = None
except gevent.queue.Empty:
pass
except Exception as err:
import traceback
traceback.print_exc()
self.add_error_metrics("send event")
self.rpc_mr.record(1, {"id": self.id, "type": "error", "mode": mode,
"name": event.name if event else ""})
self.logger.error("fail to send event, error %s", err)
finally:
# sleep while idle
if not events_sent:
idle_run += 1
idle_run = min(idle_run, 5)
gevent.sleep(0.1 * idle_run)
else:
idle_run = 0
| [
"[email protected]"
]
| |
8b23a3fffb6859b0622210f0f50699c660b3ef3f | 50ee2f4f1a7d2e5ff7ac35118c5ac45f9b923865 | /0x01-python-if_else_loops_functions/1-last_digit.py | c7b28ae9d733661962aa47ddbb2e987589ebc1b4 | []
| no_license | spencerhcheng/holbertonschool-higher_level_programming | b489fbe8eba6109ef1eaa0d9363f3477e7eb16c4 | f8e1dbc24fcf8fb40ca135d2700872eb773e481e | refs/heads/master | 2021-01-20T06:54:35.044899 | 2018-05-20T05:09:59 | 2018-05-20T05:09:59 | 89,943,332 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | #!/usr/bin/python3
import random
number = random.randint(-10000, 10000)
if number > 0:
lastNum = number % 10
elif number <= 0:
lastNum = number % -10
print('Last digit of {:d} is {:d}'. format(number, lastNum), end=" ")
if lastNum > 5:
print('and is greater than 5')
elif lastNum == 0:
print('and is 0')
elif lastNum < 6:
print('and is less than 6 and not 0')
| [
"[email protected]"
]
| |
6aaadd38872c563c7e3b4fd9a31a6d2edfb79945 | 41b73ecc4fa00a58609c1c3b8e717bbbc13cdee6 | /test/test_all.py | d7bd3837fc94c5de55e932b9801ad5547ef409f3 | []
| no_license | ahwillia/sinkdiv | 70c2f689af43cf80dd8c3951199885f3792d9ac3 | 85bd51f369855b78e5c0e1d5bb2aa8928d85c428 | refs/heads/master | 2023-01-31T10:56:08.481608 | 2020-12-18T04:41:26 | 2020-12-18T04:41:26 | 298,928,192 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,364 | py | import pytest
import numpy as np
from numpy.testing import assert_allclose
from sinkdiv import OTCost, ForwardKL, Balanced
from scipy.optimize import approx_fprime
def test_entropy_increases(make_fig=False):
"""
Check that increasing epsilon increases blur in the
transport plan.
"""
epsilons = (0.01, 0.1, 1.0)
margdiv = ForwardKL(1.0)
x = np.linspace(-4, 4, 51)[:, None]
y = np.linspace(-4, 4, 50)[:, None]
a = np.squeeze(np.exp(-x ** 2))
b = np.squeeze(np.exp(-y ** 2))
a /= np.sum(a)
b /= np.sum(b)
# Fit transport plans.
plans = []
for eps in epsilons:
plans.append(
OTCost(margdiv, eps, 1e-6).fit(a, x, b, y).P_
)
# Test that the entropy of the optimal plan increases.
entropies = [np.sum(-P * np.log(P + 1e-10) - P + 1) for P in plans]
assert np.all(np.diff(entropies) > 0)
if make_fig:
import matplotlib.pyplot as plt
fig, axes = plt.subplots(1, 3, sharey=True, sharex=True)
for P, eps, ax in zip(plans, epsilons, axes):
ax.imshow(P, aspect="auto")
ax.set_title("eps = {}".format(eps))
fig.set_size_inches((4, 2))
fig.tight_layout()
plt.show()
# @pytest.mark.parametrize('eps', [0.01, 0.1, 1.0])
# @pytest.mark.parametrize('tol', [1e-6])
# def test_balanced_duality_gap(eps, tol):
# """
# Check agreement between primal and dual objectives,
# balanced transport case.
# """
# np.random.seed(1234)
# margdiv = Balanced()
# x = np.linspace(-4, 4, 51)[:, None]
# y = np.linspace(-4, 4, 50)[:, None]
# a = np.squeeze(np.exp(-x ** 2))
# b = np.squeeze(np.exp(-y ** 2))
# a /= a.sum()
# b /= b.sum()
# ot = OTCost(margdiv, eps, tol).fit(a, x, b, y)
# assert_allclose(ot.primal_obj_, ot.dual_obj_, atol=1e-3)
@pytest.mark.parametrize('seed', [123])
@pytest.mark.parametrize('eps', [1.0])
@pytest.mark.parametrize('lam', [1000]) # <-- !! currently works for large lam, but not small !!
@pytest.mark.parametrize('b_mass', [1.0])
@pytest.mark.parametrize('tol', [1e-6])
def test_reference_implementation(seed, eps, lam, b_mass, tol):
"""
Compare transport plan to Python Optimal Transpot (POT)
library.
"""
from ot.unbalanced import sinkhorn_stabilized_unbalanced
rs = np.random.RandomState(seed)
# Random locations for atoms.
x = rs.randn(25, 1)
y = rs.randn(24, 1)
# Random mass vectors.
a = np.random.rand(x.size)
b = np.random.rand(y.size)
# Normalize masses.
a *= (1.0 / a.sum())
b *= (b_mass / b.sum())
# Fit OTCost, get transport plan
margdiv = ForwardKL(lam)
otcost = OTCost(margdiv, eps, tol).fit(a, x, b, y)
# Fit with reference library.
transport_plan = sinkhorn_stabilized_unbalanced(
a, b, otcost.C_, eps, lam, numItermax=10000
)
# Assert optimal transport plans match.
assert_allclose(otcost.P_, transport_plan, atol=1e-5, rtol=1e-2)
@pytest.mark.parametrize('seed', [123])
@pytest.mark.parametrize('tol', [1e-6])
@pytest.mark.parametrize('eps', [1e-6])
def test_zero_cost(seed, eps, tol):
"""
Assert cost is zero if epsilon and lambda penalties are both very small.
In this case, an optimal transport plan could just be the zeros matrix.
"""
rs = np.random.RandomState(seed)
# Random locations for atoms.
x = rs.randn(25, 1)
y = rs.randn(24, 1)
# Random mass vectors.
a = np.random.rand(x.size)
b = np.random.rand(y.size)
# Normalize masses.
a *= (1.0 / a.sum())
b *= (1.0 / b.sum())
# Fit model with very small marginal penalty
margdiv = ForwardKL(1e-6)
otcost = OTCost(margdiv, eps, tol).fit(a, x, b, y)
# Assert cost is essentially zero.
assert_allclose(otcost.primal_obj_, 0.0, atol=1e-5)
assert_allclose(otcost.dual_obj_, 0.0, atol=1e-5)
@pytest.mark.parametrize('seed', [123])
@pytest.mark.parametrize('eps', [0.1, 1.0, 10])
@pytest.mark.parametrize('lam', [0.1, 1.0, 10])
@pytest.mark.parametrize('b_mass', [0.5, 1.0, 2.0])
@pytest.mark.parametrize('tol', [1e-6])
def test_unbalanced_kl_duality_gap(seed, eps, lam, b_mass, tol):
"""
Compare transport plan to Python Optimal Transpot (POT)
library.
"""
rs = np.random.RandomState(seed)
# Random locations for atoms.
x = rs.randn(25, 1)
y = rs.randn(24, 1)
# Random mass vectors.
a = np.random.rand(x.size)
b = np.random.rand(y.size)
# Normalize masses.
a *= (1.0 / a.sum())
b *= (b_mass / b.sum())
# Calculate OT cost.
margdiv = ForwardKL(lam)
otcost = OTCost(margdiv, eps, tol).fit(a, x, b, y)
# Duality gap should be small.
assert_allclose(otcost.primal_obj_, otcost.dual_obj_, atol=1e-4)
@pytest.mark.parametrize('seed', [123, 1234])
@pytest.mark.parametrize('eps', [0.1, 1.0, 10])
@pytest.mark.parametrize('lam', [0.1, 1.0, 10])
@pytest.mark.parametrize('b_mass', [0.5, 1.0, 2.0])
@pytest.mark.parametrize('tol', [1e-6])
def test_ot_kl_gradients(seed, eps, lam, b_mass, tol):
"""
Compare transport plan to Python Optimal Transpot (POT)
library.
"""
rs = np.random.RandomState(seed)
# Random locations for atoms.
x = rs.randn(25, 1)
y = rs.randn(24, 1)
# Random mass vectors.
a = np.random.rand(x.size)
b = np.random.rand(y.size)
# Normalize masses.
a *= (1.0 / a.sum())
b *= (b_mass / b.sum())
# Calculate OT cost.
margdiv = ForwardKL(lam)
otcost = OTCost(margdiv, eps, tol)
# Fit OT cost, compute gradients for a and b.
otcost.fit(a, x, b, y)
grad_a = otcost.grad_a_.copy()
grad_b = otcost.grad_b_.copy()
# Compute gradient of a by finite differencing.
def f(a_):
otcost.fit(a_, x, b, y)
return otcost.primal_obj_
approx_grad_a = approx_fprime(a, f, np.sqrt(np.finfo(float).eps))
# Check gradients approximately match finite differencing.
assert_allclose(grad_a, approx_grad_a, atol=1e-4, rtol=1e-3)
# Function to compute otcost given mass vector b.
def g(b_):
otcost.fit(a, x, b_, y)
return otcost.primal_obj_
approx_grad_b = approx_fprime(b, g, np.sqrt(np.finfo(float).eps))
# Check gradients approximately match finite differencing.
assert_allclose(grad_b, approx_grad_b, atol=1e-4, rtol=1e-3)
| [
"[email protected]"
]
| |
6aca78d446a771d1bdc8bb31bbbc2bb778bacfba | 206c10808b6224f7d8236e27cc555e723af695d9 | /tests/test_empty_service.py | 8ab14bce925b0271890c48c84c359ad361d40e51 | [
"MIT"
]
| permissive | xdmiodz/tomodachi | 3280209ae49100ec902e3b15c323b38e7480cdd3 | 7ca998a421dd724df5967d5baa0cf79f5112b79b | refs/heads/master | 2023-03-15T19:22:16.381212 | 2023-01-20T07:34:48 | 2023-01-20T07:34:48 | 200,020,833 | 0 | 2 | MIT | 2023-03-08T00:00:01 | 2019-08-01T09:30:22 | Python | UTF-8 | Python | false | false | 674 | py | from typing import Any
from run_test_service_helper import start_service
def test_empty_service(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service("tests/services/empty_service.py", monkeypatch)
loop.run_until_complete(future)
out, err = capsys.readouterr()
assert "No transports defined in service file" in err
def test_non_decorated_service(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service("tests/services/non_decorated_service.py", monkeypatch)
loop.run_until_complete(future)
out, err = capsys.readouterr()
assert "No transports defined in service file" in err
| [
"[email protected]"
]
| |
cfb9ff1a1089622084ea929a8ceebf87da9d0687 | 45799ccc3a16c785ab3c65f3296d66f8463590dc | /docs/_downloads/b9951f29cd54bc08237c8fb75b9c2476/q1314.py | b487939c8e11b9a0513ff9639257664f5e82d07a | [
"MIT"
]
| permissive | odys-z/hello | 9d29b7af68ea8c490b43994cf16d75c0e8ace08e | fedd0aec7273f3170aa77316d0d5f317cc18a979 | refs/heads/master | 2023-08-19T03:25:58.684050 | 2023-08-18T08:07:27 | 2023-08-18T08:07:27 | 154,006,292 | 0 | 0 | MIT | 2023-04-18T22:50:56 | 2018-10-21T12:34:12 | C++ | UTF-8 | Python | false | false | 2,347 | py | '''
1314. Matrix Block Sum
https://leetcode.com/problems/matrix-block-sum/
Given a m * n matrix mat and an integer K, return a matrix answer where each answer[i][j] is
the sum of all elements mat[r][c] for i - K <= r <= i + K, j - K <= c <= j + K, and (r, c)
is a valid position in the matrix.
Example 1:
Input: mat = [[1,2,3],[4,5,6],[7,8,9]], K = 1
Output: [[12,21,16],[27,45,33],[24,39,28]]
Example 2:
Input: mat = [[1,2,3],[4,5,6],[7,8,9]], K = 2
Output: [[45,45,45],[45,45,45],[45,45,45]]
Constraints:
m == mat.length
n == mat[i].length
1 <= m, n, K <= 100
1 <= mat[i][j] <= 100
Hint 1:
How to calculate the required sum for a cell (i,j) fast ?
Hint 2:
Use the concept of cumulative sum array.
Hint 3:
Create a cumulative sum matrix where dp[i][j] is the sum of all cells in the rectangle
from (0,0) to (i,j), use inclusion-exclusion idea.
'''
from unittest import TestCase
from typing import List
class Solution:
'''
70.85%
'''
def matrixBlockSum(self, mat: List[List[int]], K: int) -> List[List[int]]:
# dp
m, n = len(mat), len(mat[0])
dp = [[0] * (n+K) for _ in range(m+K)]
for r in range(m):
dp[r][0] = mat[r][0]
for c in range(1, n+K):
if c < n:
dp[r][c] = mat[r][c] + dp[r][c-1]
else:
dp[r][c] = dp[r][c-1]
for c in range(n+K):
for r in range(1, m+K):
if r < m:
dp[r][c] += dp[r-1][c]
else:
dp[r][c] = dp[r-1][c]
for r in range(m):
for c in range(n):
mat[r][c] = dp[r+K][c+K]
if 0 <= r - K - 1:
mat[r][c] -= dp[r-K-1][c+K]
if 0 <= c - K - 1:
mat[r][c] -= dp[r+K][c-K-1]
if 0 <= r - K - 1 and 0 <= c - K - 1:
mat[r][c] += dp[r-K-1][c-K-1]
return mat
if __name__ == '__main__':
t = TestCase()
s = Solution()
t.assertCountEqual([[12,21,16],[27,45,33],[24,39,28]],
s.matrixBlockSum([[1,2,3],[4,5,6],[7,8,9]], 1))
t.assertCountEqual([[45,45,45],[45,45,45],[45,45,45]],
s.matrixBlockSum([[1,2,3],[4,5,6],[7,8,9]], 2))
print("OK!") | [
"[email protected]"
]
| |
17fe19b4e80f15be0aa96d6afc0197167630396f | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /Yfksxs7kyJf6B3yvK_21.py | 3d96e93dc0ddaedcb2d4e9ec9ecf8a4618a5d7cd | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,234 | py | """
Given a list of integers, return the smallest _positive_ integer _not present
in the list_.
Here is a representative example. Consider the list:
[-2, 6, 4, 5, 7, -1, 7, 1, 3, 6, 6, -2, 9, 10, 2, 2]
After reordering, the list becomes:
[-2, -2, -1, 1, 2, 2, 3, 4, 5, 6, 6, 6, 7, 7, 9, 10]
... from which we see that the smallest missing positive integer is `8`.
### Examples
min_miss_pos([-2, 6, 4, 5, 7, -1, 1, 3, 6, -2, 9, 10, 2, 2]) ➞ 8
# After sorting, list becomes [-2, -2, -1, 1, 2, 2, 3, 4, 5, 6, 6, 7, 9, 10]
# So the smallest missing positive integer is 8
min_miss_pos([5, 9, -2, 0, 1, 3, 9, 3, 8, 9]) ➞ 2
# After sorting, list becomes [-2, 0, 1, 3, 3, 5, 8, 9, 9, 9]
# So the smallest missing positive integer is 2
min_miss_pos([0, 4, 4, -1, 9, 4, 5, 2, 10, 7, 6, 3, 10, 9]) ➞ 1
# After sorting, list becomes [-1, 0, 2, 3, 4, 4, 4, 5, 6, 7, 9, 9, 10, 10]
# So the smallest missing positive integer is 1
### Notes
For the sake of clarity, recall that `0` is not considered to be a positive
number.
"""
def min_miss_pos(lst):
for i in range(1, 2<<64): # huge range instead of "while" or itertools.count
if i not in lst:
return i
| [
"[email protected]"
]
| |
ce23796651ea87049745a818cb08caafa35cc580 | 9eef3e4cf39a659268694cf08a4a799af8fb13e2 | /packages/dpdprops/dpdprops/__init__.py | c42c51871769928dd028add49df137aafa25b487 | []
| no_license | cselab/tRBC-UQ | c30ec370939b949c989d2e9cd30137073b53e7d2 | cd7711b76c76e86bc6382914111f4fa42aa78f2c | refs/heads/master | 2023-04-18T03:06:49.175259 | 2022-10-25T15:45:07 | 2022-10-25T15:45:07 | 483,407,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | from .fluid import *
from .dpdparams import (DPDParams,
create_dpd_params_from_str,
create_dpd_params_from_Re_Ma,
create_dpd_params_from_props)
from .membrane import *
from .membraneparams import (MembraneParams,
KantorParams,
JuelicherParams,
WLCParams,
LimParams,
DefaultRBCParams,
KantorWLCRBCDefaultParams,
JuelicherLimRBCDefaultParams)
from .membraneforces import (extract_dihedrals,
compute_kantor_energy,
compute_juelicher_energy)
from .fsi import (get_gamma_fsi_DPD_membrane,
create_fsi_dpd_params)
from .rbcmesh import (load_stress_free_mesh,
load_equilibrium_mesh)
| [
"[email protected]"
]
| |
e9fb301ce413574e49d9b5dab04e7840eb52ae8b | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnetonian.py | 43b62a44c0b174541904a814c37cf4917415a758 | []
| no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 62 | py | ii = [('ClarGE3.py', 2), ('DibdTRL.py', 1), ('DibdTBR.py', 1)] | [
"[email protected]"
]
| |
ceed057825798d46c509ddab61ac189ced30ad29 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-oms/setup.py | b81b7515b7d134fa9438170ce81a39929b9463d6 | [
"Apache-2.0"
]
| permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py | # coding: utf-8
from os import path
from setuptools import setup, find_packages
NAME = "huaweicloudsdkoms"
VERSION = "3.0.52"
AUTHOR = "HuaweiCloud SDK"
AUTHOR_EMAIL = "[email protected]"
URL = "https://github.com/huaweicloud/huaweicloud-sdk-python-v3"
DESCRIPTION = "OMS"
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README_PYPI.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
REQUIRES = ["huaweicloudsdkcore"]
OPTIONS = {
'bdist_wheel': {
'universal': True
}
}
setup(
name=NAME,
version=VERSION,
options=OPTIONS,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache LICENSE 2.0",
url=URL,
keywords=["huaweicloud", "sdk", "OMS"],
packages=find_packages(exclude=["tests*"]),
install_requires=REQUIRES,
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*",
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development'
]
)
| [
"[email protected]"
]
| |
d5ddd74869a157b83c40a72dcab563c596578394 | ce196aba0adde47ea2767eae1d7983a1ef548bb8 | /T083_求用0—7所能组成的奇数个数.py | 0fb0007220933911a99ceca79ed911aaae9783bb | []
| no_license | xiang-daode/Python3_codes | 5d2639ffd5d65065b98d029e79b8f3608a37cf0b | 06c64f85ce2c299aef7f9311e9473e0203a05b09 | refs/heads/main | 2023-08-30T14:59:55.123128 | 2021-11-03T05:12:24 | 2021-11-03T05:12:24 | 333,632,892 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | # 在这里写上你的代码 :-)
'''
题目083:求0—7所能组成的奇数个数。
'''
def tm083():
'''
【个人备注】:没说组成几位数或是否重复使用。假设1-8位都可以,且不能重复使用。
直接用排列函数,累加然后去重,就得到答案了。
'''
s = [i for i in '01234567']
import itertools #有排列与组合函数
arr = []
for i in range(1,9):
a = list(itertools.permutations(s,i)) # 长度1-8左右排列
l = list(map(lambda x:int(''.join(x)),a)) # 整理成数字形式(避免出现02这种情况,02实际上就是2)
arr+=l
print(i,len(l))
arr1 = set(arr) # 去重复的
arr2 = list(filter(lambda x:x%2==1,arr1)) # 只留奇数
print(len(arr),len(arr1),len(arr2)) # 答案是46972
tm083()
| [
"[email protected]"
]
| |
6e27170626bd5d4c4cb409cc4fe8e7ed80e75715 | dc9f2638209a9be235a1c4acc44fe2a26256c4b4 | /venv/projects/lib/python3.8/site-packages/pip/_vendor/chardet/mbcharsetprober.py | f875974d3c29050ff39044e0bf631df473d0e087 | []
| no_license | alwinruby/RealWorld | 4f5fcaed68fdd2d9fc37f5973fec365195cb3e9e | ec446f96f3545cb847429b5e33cefdc4f00ce432 | refs/heads/main | 2023-08-13T10:28:40.528047 | 2021-10-10T14:58:23 | 2021-10-10T14:58:23 | 408,079,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,413 | py | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .enums import ProbingState, MachineState
class MultiByteCharSetProber(CharSetProber):
"""
MultiByteCharSetProber
"""
def __init__(self, lang_filter=None):
super(MultiByteCharSetProber, self).__init__(lang_filter=lang_filter)
self.distribution_analyzer = None
self.coding_sm = None
self._last_char = [0, 0]
def reset(self):
super(MultiByteCharSetProber, self).reset()
if self.coding_sm:
self.coding_sm.reset()
if self.distribution_analyzer:
self.distribution_analyzer.reset()
self._last_char = [0, 0]
@property
def charset_name(self):
raise NotImplementedError
@property
def language(self):
raise NotImplementedError
def feed(self, byte_str):
for i in range(len(byte_str)):
coding_state = self.coding_sm.next_state(byte_str[i])
if coding_state == MachineState.ERROR:
self.logger.debug('%s %s prober hit error at byte %s',
self.charset_name, self.language, i)
self._state = ProbingState.NOT_ME
break
elif coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT
break
elif coding_state == MachineState.START:
char_len = self.coding_sm.get_current_charlen()
if i == 0:
self._last_char[1] = byte_str[0]
self.distribution_analyzer.feed(self._last_char, char_len)
else:
self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self._last_char[0] = byte_str[-1]
if self.state == ProbingState.DETECTING:
if (self.distribution_analyzer.got_enough_data() and
(self.get_confidence() > self.SHORTCUT_THRESHOLD)):
self._state = ProbingState.FOUND_IT
return self.state
def get_confidence(self):
return self.distribution_analyzer.get_confidence()
| [
"[email protected]"
]
| |
95b09bf9b3e4db89414199c59be246b83df7e9f0 | 835881ade89eaff933f81d186e69fcf9695d9392 | /bolero/utils/setup.py | dcce793f7c39de9bdf163a6985c1d62c94056aed | [
"BSD-3-Clause"
]
| permissive | MMKrell/bolero | 9e056a88aa89332762c0f06d4f8e43fc4ac64018 | 0e011de35f2b364bb3bb7509bc38491762026643 | refs/heads/master | 2021-01-21T15:19:20.012273 | 2017-05-19T13:38:47 | 2017-05-19T13:38:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | def configuration(parent_package="", top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration("utils", parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration(top_path="").todict())
| [
"[email protected]"
]
| |
eff44ce1869cc6d0c340bdadc54f92b6e8ba7f01 | 817f6b5a69b53599589b798b94efecd8ed1d8e17 | /exercises/1901100282/d07/mymodule/main.py | 2eb3cc635245118bc6dace1675a2ec08d1d02312 | []
| no_license | oneisonly/selfteaching-python-camp | 2422a16c0c9efe787f18fa48833b0bdc8e245982 | 2f26872d31c7392f9530ee1aa7be7958109aaec3 | refs/heads/master | 2020-07-04T23:17:39.750702 | 2019-12-03T04:38:43 | 2019-12-03T04:38:43 | 202,078,442 | 0 | 0 | null | 2019-08-13T06:24:02 | 2019-08-13T06:24:01 | null | UTF-8 | Python | false | false | 3,999 | py | text = '''
愚公移⼭山
太⾏行行,王屋⼆二⼭山的北北⾯面,住了了⼀一個九⼗十歲的⽼老老翁,名叫愚公。⼆二⼭山佔地廣闊,擋住去路路,使他
和家⼈人往來來極為不不便便。
⼀一天,愚公召集家⼈人說:「讓我們各盡其⼒力力,剷平⼆二⼭山,開條道路路,直通豫州,你們認為怎
樣?」
⼤大家都異異⼝口同聲贊成,只有他的妻⼦子表示懷疑,並說:「你連開鑿⼀一個⼩小丘的⼒力力量量都沒有,怎
可能剷平太⾏行行、王屋⼆二⼭山呢?況且,鑿出的⼟土⽯石⼜又丟到哪裏去呢?」
⼤大家都熱烈烈地說:「把⼟土⽯石丟進渤海海裏。」
於是愚公就和兒孫,⼀一起開挖⼟土,把⼟土⽯石搬運到渤海海去。
愚公的鄰居是個寡婦,有個兒⼦子⼋八歲也興致勃勃地⾛走來來幫忙。
寒來來暑往,他們要⼀一年年才能往返渤海海⼀一次。
住在⿈黃河河畔的智叟,看⾒見見他們這樣⾟辛苦,取笑愚公說:「你不不是很愚蠢嗎?你已⼀一把年年紀
了了,就是⽤用盡你的氣⼒力力,也不不能挖去⼭山的⼀一⻆角呢?」
愚公歎息道:「你有這樣的成⾒見見,是不不會明⽩白的。你⽐比那寡婦的⼩小兒⼦子還不不如呢!就算我死
了了,還有我的兒⼦子,我的孫⼦子,我的曾孫⼦子,他們⼀一直傳下去。⽽而這⼆二⼭山是不不會加⼤大的,總有
⼀一天,我們會把它們剷平。」
智叟聽了了,無話可說:
⼆二⼭山的守護神被愚公的堅毅精神嚇倒,便便把此事奏知天帝。天帝佩服愚公的精神,就命兩位⼤大
⼒力力神揹⾛走⼆二⼭山。
How The Foolish Old Man Moved Mountains
Yugong was a ninety-year-old man who lived at the north of two high
mountains, Mount Taixing and Mount Wangwu.
Stretching over a wide expanse of land, the mountains blocked
yugong’s way making it inconvenient for him and his family to get
around.
One day yugong gathered his family together and said,”Let’s do our
best to level these two mountains. We shall open a road that leads
to Yuzhou. What do you think?”
All but his wife agreed with him.
“You don’t have the strength to cut even a small mound,” muttered
his wife. “How on earth do you suppose you can level Mount Taixin
and Mount Wanwu? Moreover, where will all the earth and rubble go?”
“Dump them into the Sea of Bohai!” said everyone.
So Yugong, his sons, and his grandsons started to break up rocks and
remove the earth. They transported the earth and rubble to the Sea
of Bohai.
Now Yugong’s neighbour was a widow who had an only child eight years
old. Evening the young boy offered his help eagerly.
Summer went by and winter came. It took Yugong and his crew a full
year to travel back and forth once.
On the bank of the Yellow River dwelled an old man much respected
for his wisdom. When he saw their back-breaking labour, he ridiculed
Yugong saying,”Aren’t you foolish, my friend? You are very old now,
and with whatever remains of your waning strength, you won’t be able
to remove even a corner of the mountain.”
Yugong uttered a sigh and said,”A biased person like you will never
understand. You can’t even compare with the widow’s little boy!”
“Even if I were dead, there will still be my children, my
grandchildren, my great grandchildren, my great great grandchildren.
They descendants will go on forever. But these mountains will not
grow any taler. We shall level them one day!” he declared with
confidence.
The wise old man was totally silenced.
When the guardian gods of the mountains saw how determined Yugong
and his crew were, they were struck with fear and reported the
incident to the Emperor of Heavens.
Filled with admiration for Yugong, the Emperor of Heavens ordered
two mighty gods to carry the mountains away.
'''
import stats_word
stats_word.stats_word(text)
| [
"[email protected]"
]
| |
aae01e5ea480127d1b556c6aea6273ee7d32d993 | cccf8da8d41ae2c14f5f4313c1edcf03a27956bb | /python/python2latex/writeLTXtextnormal.py | 2f9ea1c10c926e3827f58c7bf4835b22cb57fa58 | []
| no_license | LucaDiStasio/transpilers | e8f8ac4d99be3b42a050148ca8fbc5d025b83290 | c55d4f5240083ffd512f76cd1d39cff1016909b8 | refs/heads/master | 2021-01-12T01:57:00.540331 | 2017-11-01T13:59:55 | 2017-11-01T13:59:55 | 78,448,378 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,293 | py | # Autogenerated with SMOP
from smop.core import *
#
@function
def writeLTXtextnormal(filepath=None,args=None,options=None,*args,**kwargs):
varargin = writeLTXtextnormal.varargin
nargin = writeLTXtextnormal.nargin
##
#==============================================================================
# Copyright (c) 2016-2017 Universite de Lorraine & Lulea tekniska universitet
# Author: Luca Di Stasio <[email protected]>
# <[email protected]>
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution
# Neither the name of the Universite de Lorraine or Lulea tekniska universitet
# nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==============================================================================
# DESCRIPTION
#
# A function to create a Latex file.
# Sets normal font. SeeText Formatting.#
##
fileId=fopen(filepath,'a')
fprintf(fileId,'\\n')
line='\\textnormal'
if logical_not(strcmp(options,'none')) and logical_not(strcmp(options,'NONE')) and logical_not(strcmp(options,'None')):
line=strcat(line,'[',options,']')
if logical_not(isempty(args)):
line=strcat(line,'{')
for i in arange(1,length(args)).reshape(-1):
dims=size(args)
if dims[1] == 1 and dims[2] == 1:
line=strcat(line,args[i])
else:
if dims[1] > 1 and dims[2] == 1:
try:
line=strcat(line,args[i][1])
finally:
pass
else:
if dims[1] == 1 and dims[2] > 1:
try:
line=strcat(line,args[1][i])
finally:
pass
else:
line=strcat(line,args[i])
line=strcat(line,'}')
fprintf(fileId,strcat(line,'\\n'))
fclose(fileId)
return | [
"[email protected]"
]
| |
4a60de6be31da7bf31c87e44c1819edbb0b124a0 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_040/ch20_2020_03_05_18_36_09_760355.py | f61391c93f57ceb3a39f6885c928eb85d74c21f9 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | distancia=float(input("Qual distância você deseja percorrer: "))
if (distancia<=200):
print ("R$",(distancia:.2f*0.5))
else:
print ("R$",(200*0.5+(distancia:.2f-200)*0.45)) | [
"[email protected]"
]
| |
5b1b804ba412f88488a66775b1cd8af3b8f2a81e | 517d461257edd1d6b239200b931c6c001b99f6da | /Circuit_Playground/CircuitPython/Data_Logging/typing/typing_original_.py | 5b9aa66386ae0b84741b00930ee46fc0dee033a7 | []
| no_license | cmontalvo251/Microcontrollers | 7911e173badff93fc29e52fbdce287aab1314608 | 09ff976f2ee042b9182fb5a732978225561d151a | refs/heads/master | 2023-06-23T16:35:51.940859 | 2023-06-16T19:29:30 | 2023-06-16T19:29:30 | 229,314,291 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,283 | py | # Circuit Playground Express Data Time/Light Intensity/Temp
# Log data to a spreadsheet on-screen
# Open Spreadsheet beforehand and position to start (A,1)
# Use slide switch to start and stop sensor readings
# Time values are seconds since board powered on (relative time)
import time
from digitalio import DigitalInOut, Direction, Pull
import analogio
import board
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
import adafruit_thermistor
# Switch to quickly enable/disable
switch = DigitalInOut(board.SLIDE_SWITCH)
switch.pull = Pull.UP
# light level
light = analogio.AnalogIn(board.LIGHT)
# temperature
thermistor = adafruit_thermistor.Thermistor(board.TEMPERATURE, 10000,
10000, 25, 3950)
# Set the keyboard object!
# Sleep for a bit to avoid a race condition on some systems
time.sleep(1)
kbd = Keyboard()
layout = KeyboardLayoutUS(kbd) # US is only current option...
led = DigitalInOut(board.D13) # Set up red LED "D13"
led.direction = Direction.OUTPUT
print("Time\tLight\tTemperature") # Print column headers
def slow_write(string): # Typing should not be too fast for
for c in string: # the computer to be able to accept
layout.write(c)
time.sleep(0.2) # use 1/5 second pause between characters
while True:
if switch.value: # If the slide switch is on, don't log
continue
# Turn on the LED to show we're logging
led.value = True
temp = thermistor.temperature # In Celsius
# if you want Fahrenheit, uncomment the line below
# temp = temp * 9 / 5 + 32
# Format data into value 'output'
output = "%0.1f\t%d\t%0.1f" % (time.monotonic(), light.value, temp)
print(output) # Print to serial monitor
slow_write(output) # Print to spreadsheet
kbd.press(Keycode.DOWN_ARROW) # Code to go to next row
time.sleep(0.01)
kbd.release_all()
for _ in range(3):
kbd.press(Keycode.LEFT_ARROW)
time.sleep(0.015)
kbd.release_all()
time.sleep(0.025) # Wait a bit more for Google Sheets
led.value = False
# Change 0.1 to whatever time you need between readings
time.sleep(0.1) | [
"[email protected]"
]
| |
64d2855cd04459ab7a7b86a9e703c6518a7c19f3 | b580fd482147e54b1ca4f58b647fab016efa3855 | /host_im/mount/malware-classification-master/samples/not/sample_good666.py | 3687c9337e2f798525c72cf0779d606b08e582b2 | []
| no_license | Barnsa/Dissertation | 1079c8d8d2c660253543452d4c32799b6081cfc5 | b7df70abb3f38dfd446795a0a40cf5426e27130e | refs/heads/master | 2022-05-28T12:35:28.406674 | 2020-05-05T08:37:16 | 2020-05-05T08:37:16 | 138,386,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | import random
import re
import array
import textwrap
import readline
nterms = 195
n1, n2 = 0, 1
if nterms <= 0:
print("Please provide a positive integer.")
elif nterms == 1:
print("Fibonacci sequence upto", nterms, ":")
print(n1)
else:
print("Fibonacci sequence:")
count = 0
while 0 < 195:
print(n1)
nth = n1 + n2
n1 = n2
n2 = nth
count = count - (2 - 3)
| [
"[email protected]"
]
| |
6730aafef63549f62e2673d9ec48a2b98ce7cfcc | d044e88e622d9f4ca350aa4fd9d95d7ba2fae50b | /application/dataentry/migrations/0192_auto_20210722_1359.py | 7e1c663d3fec4392b13dc51e6c16f22fc0f16cee | []
| no_license | Tiny-Hands/tinyhands | 337d5845ab99861ae189de2b97b8b36203c33eef | 77aa0bdcbd6f2cbedc7eaa1fa4779bb559d88584 | refs/heads/develop | 2023-09-06T04:23:06.330489 | 2023-08-31T11:31:17 | 2023-08-31T11:31:17 | 24,202,150 | 7 | 3 | null | 2023-08-31T11:31:18 | 2014-09-18T19:35:02 | PLpgSQL | UTF-8 | Python | false | false | 497 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2021-07-22 13:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dataentry', '0191_auto_20210712_1433'),
]
operations = [
migrations.AlterField(
model_name='stationstatistics',
name='budget',
field=models.DecimalField(decimal_places=2, max_digits=17, null=True),
),
]
| [
"[email protected]"
]
| |
9efe0099db495a6abf8ec4e5391c09aec9b087d3 | 525bdfe2c7d33c901598a501c145df94a3e162b0 | /math_projects/kateryna/bin/constants.py | c2f278310902832628add1fa859476272f1c01ff | []
| no_license | davendiy/ads_course2 | f0a52108f1cab8619b2e6e2c6c4383a1a4615c15 | e44bf2b535b34bc31fb323c20901a77b0b3072f2 | refs/heads/master | 2020-04-06T09:37:12.983564 | 2019-05-09T10:28:22 | 2019-05-09T10:28:22 | 157,349,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | #!/usr/bin/env python3
# -*-encoding: utf-8-*-
import logging
DEFAULT_N = 1000 # к-ть елементів, які повертає пошук за умовчанням
# типи елементів (значення - назви таблиць у БД)
KEY_WORD = 'Key_words'
SITE = 'Sites'
LINK = 'Links'
CATEGORIES = 'Categories' # назва таблиці категорій
DEFAULT_DATABASE = 'data.db' # шлях до бд за умовчанням
DEFAULT_LOG_GUI = 'parser_gui.log' # файл з логами для графічного інтерфейсу
DEFAULT_LOG_CLIENT = 'parser_client.log' # файл з логами для клієнта
FORMAT = '%(asctime) -15s %(message)s' # формат запису: <час> <повідомлення>
SLEEP = 1 # тривалість інтервалу монтіорингу (у годинах)
# списки полів для кожної таблиці, які відображаються
LINKS_GUI_FIELDS = ['Link', 'Category', 'Date', 'Information']
SITES_GUI_FIELDS = ['Id', 'Name', 'Link']
KEY_WORDS_GUI_FIELDS = ['Id', 'Word']
# списки всіх полів для кожної таблиці
SITES_DATA_FIELDS = ['Id', 'Name', 'Link', 'Category_id']
KEY_WORDS_DATA_FIELDS = ['Id', 'Word', "Category_id"]
CATEGORIES_FIELDS = ['Id', 'Name']
| [
"[email protected]"
]
| |
34109b133c9e51f5fe159c8a970393a67ac6d7d8 | 169e75df163bb311198562d286d37aad14677101 | /tensorflow/tensorflow/python/ops/gradients.py | 9fa8e27d5cb51e0c2dd0b7926756a579d38841d2 | [
"Apache-2.0"
]
| permissive | zylo117/tensorflow-gpu-macosx | e553d17b769c67dfda0440df8ac1314405e4a10a | 181bc2b37aa8a3eeb11a942d8f330b04abc804b3 | refs/heads/master | 2022-10-19T21:35:18.148271 | 2020-10-15T02:33:20 | 2020-10-15T02:33:20 | 134,240,831 | 116 | 26 | Apache-2.0 | 2022-10-04T23:36:22 | 2018-05-21T08:29:12 | C++ | UTF-8 | Python | false | false | 1,240 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.eager.backprop import GradientTape
from tensorflow.python.ops.custom_gradient import custom_gradient
from tensorflow.python.ops.gradients_impl import AggregationMethod
from tensorflow.python.ops.gradients_impl import gradients
from tensorflow.python.ops.gradients_impl import hessians
# pylint: enable=unused-import
| [
"[email protected]"
]
| |
68d509c7c66a8393f202ba51444e4af380bc3c9b | 9ca9cad46f2358717394f39e2cfac2af4a2f5aca | /Week16/MainHW/MainHW Week16_KSY.py | 86af737749bde01e82c6dcf8a85382d1d4c33cd5 | []
| no_license | Artinto/Python_and_AI_Study | ddfd165d1598914e99a125c3019a740a7791f6f6 | 953ff3780287825afe9ed5f9b45017359707d07a | refs/heads/main | 2023-05-05T15:42:25.963855 | 2021-05-24T12:24:31 | 2021-05-24T12:24:31 | 325,218,591 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,897 | py | '''
This script shows how to predict stock prices using a basic RNN
'''
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import os
import matplotlib
torch.manual_seed(777) # reproducibility
import matplotlib.pyplot as plt
def MinMaxScaler(data):
''' Min Max Normalization
Parameters
----------
data : numpy.ndarray
input data to be normalized
shape: [Batch size, dimension]
Returns
----------
data : numpy.ndarry
normalized data
shape: [Batch size, dimension]
References
----------
.. [1] http://sebastianraschka.com/Articles/2014_about_feature_scaling.html
'''
numerator = data - np.min(data, 0)
denominator = np.max(data, 0) - np.min(data, 0)
# noise term prevents the zero division
return numerator / (denominator + 1e-7)
# train Parameters
learning_rate = 0.01
num_epochs = 500
input_size = 5
hidden_size = 5
num_classes = 1
timesteps = seq_length = 14
num_layers = 1 # number of layers in RNN
# Open, High, Low, Volume, Close
xy = np.loadtxt('stock.csv', delimiter=',')
xy = xy[::-1] # reverse order (chronically ordered)
xy = MinMaxScaler(xy)
x = xy
y = xy[:, [-1]] # Close as label
# build a dataset
dataX = []
dataY = []
for i in range(0, len(y) - seq_length):
_x = x[i:i + seq_length]
_y = y[i + seq_length] # Next close price
dataX.append(_x)
dataY.append(_y)
# train/test split
train_size = int(len(dataY) * 0.7)
test_size = len(dataY) - train_size
trainX = torch.Tensor(np.array(dataX[0:train_size]))
trainX = Variable(trainX)
testX = torch.Tensor(np.array(dataX[train_size:len(dataX)]))
testX = Variable(testX)
trainY = torch.Tensor(np.array(dataY[0:train_size]))
trainY = Variable(trainY)
testY = torch.Tensor(np.array(dataY[train_size:len(dataY)]))
testY = Variable(testY)
class LSTM(nn.Module):
def __init__(self, num_classes, input_size, hidden_size, num_layers):
super(LSTM, self).__init__()
self.num_classes = num_classes
self.num_layers = num_layers
self.input_size = input_size
self.hidden_size = hidden_size
self.seq_length = seq_length
# Set parameters for RNN block
# Note: batch_first=False by default.
# When true, inputs are (batch_size, sequence_length, input_dimension)
# instead of (sequence_length, batch_size, input_dimension)
self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True)
# Fully connected layer
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
# Initialize hidden and cell states
h_0 = Variable(torch.zeros(
self.num_layers, x.size(0), self.hidden_size))
c_0 = Variable(torch.zeros(
self.num_layers, x.size(0), self.hidden_size))
# Propagate input through LSTM
_, (h_out, _) = self.lstm(x, (h_0, c_0))
h_out = h_out.view(-1, self.hidden_size)
out = self.fc(h_out)
return out
# Instantiate RNN model
lstm = LSTM(num_classes, input_size, hidden_size, num_layers)
# Set loss and optimizer function
criterion = torch.nn.MSELoss() # mean-squared error for regression
optimizer = torch.optim.Adam(lstm.parameters(), lr=learning_rate)
# Train the model
for epoch in range(num_epochs):
outputs = lstm(trainX)
optimizer.zero_grad()
# obtain the loss function
loss = criterion(outputs, trainY)
loss.backward()
optimizer.step()
print("Epoch: %d, loss: %1.5f" % (epoch, loss.item()))
print("Learning finished!")
# Test the model
lstm.eval()
test_predict = lstm(testX)
# Plot predictions
test_predict = test_predict.data.numpy()
testY = testY.data.numpy()
plt.plot(testY)
plt.plot(test_predict)
plt.xlabel("Time Period")
plt.ylabel("Stock Price")
plt.show()
| [
"[email protected]"
]
| |
0861a3ba0e77e14cd38e259cec9bfe9413d33873 | e7d5555eb0b80ad59e7c76dd31e5fa9a23ec4a4c | /muddery/worlddata/dao/dialogue_sentences_mapper.py | 6e08b22dd51bc6ba16bc055d6d3aed08c566c4e2 | [
"BSD-3-Clause"
]
| permissive | noahzaozao/muddery | 4d1ef24b4a7f0ef178a1c28c367a441cbb57ee5c | 294da6fb73cb04c62e5ba6eefe49b595ca76832a | refs/heads/master | 2023-01-25T02:23:50.123889 | 2018-06-10T17:12:22 | 2018-06-10T17:12:22 | 137,031,119 | 0 | 0 | NOASSERTION | 2019-10-28T15:04:26 | 2018-06-12T07:05:42 | Python | UTF-8 | Python | false | false | 691 | py | """
Query and deal common tables.
"""
from __future__ import print_function
from evennia.utils import logger
from django.apps import apps
from django.conf import settings
class DialogueSentencesMapper(object):
"""
NPC's dialogue sentences.
"""
def __init__(self):
self.model_name = "dialogue_sentences"
self.model = apps.get_model(settings.WORLD_DATA_APP, self.model_name)
self.objects = self.model.objects
def filter(self, key):
"""
Get dialogue sentences.
Args:
key: (string) dialogue's key.
"""
return self.objects.filter(dialogue=key)
DIALOGUE_SENTENCES = DialogueSentencesMapper()
| [
"[email protected]"
]
| |
ea86b165173183397c78e5aa1e6322ec98a122de | 740cd3a198c8ebb815da04c0e7a549696ab6a84c | /virtual/bin/wheel | 4ef50c7f2679686ea22a122921d06bdaa0c52a4f | []
| no_license | kahenya-anita/Simple-Ecommerce | 3a3734e8b9f8c5ce489404042c456449adbca724 | ff9d7d06869c52d595304ab238f797a1c65947d0 | refs/heads/master | 2023-03-03T13:40:43.930809 | 2021-02-09T15:24:56 | 2021-02-09T15:24:56 | 337,444,815 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/home/toshiba/Documents/Ecommerce_Django-master/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
6b34dfae513fa55c66c92dd64ea87fa9d1207242 | 45a924e5cd1dfc75a2088d3d4463995803a06a09 | /frappe/email/doctype/email_unsubscribe/test_email_unsubscribe.py | 602840fe3b30b30238661516ade48243176ea9b0 | [
"MIT"
]
| permissive | joe-santy/frappe | 7cad66295f07f60176fbbc24766af5e38ac1e9d2 | a6d9170e5fd9fdff462eee7967409ff7e23b6d2f | refs/heads/develop | 2023-07-15T15:59:03.226729 | 2021-08-09T16:20:11 | 2021-08-09T16:20:11 | 394,489,040 | 0 | 0 | MIT | 2021-08-13T13:12:31 | 2021-08-10T01:22:17 | null | UTF-8 | Python | false | false | 258 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import frappe
import unittest
# test_records = frappe.get_test_records('Email Unsubscribe')
class TestEmailUnsubscribe(unittest.TestCase):
pass
| [
"[email protected]"
]
| |
5271e0aa4d4bd54e4dc811366c02c1b6de9d5155 | e7069d85fd4a6fac4958f19b4d14503ffa42b4bb | /connecting_silos_kththesis_TCOMK_CINTE/mysite/polls/library/Canvas-master/compute_list_of_KTH_play_URLs_on_pages_in_course3.py | 3320841678f5de02e64319ba7c296b48e4a75a03 | []
| no_license | ShivaBP/Bechelor-degree-project | cd062ff10e207e380a2c59bc0a50f073c2e866bd | 9f055d69ec9deabb6bd8ab3768c9d56787eed94d | refs/heads/master | 2022-07-21T01:18:41.893027 | 2018-11-16T14:38:13 | 2018-11-16T14:38:13 | 137,949,087 | 0 | 0 | null | 2022-07-06T19:49:14 | 2018-06-19T21:47:51 | HTML | UTF-8 | Python | false | false | 16,355 | py | #!/usr/bin/python3
#
# ./compute_list_of_KTH_play_URLs_on_pages_in_course3.py course_id
#
# walks all of the course pages, the syllabus, and assignments
#
# it outputs a CSV file with the name URLs_for_course_xx.csv
# where xx is the course_id
#
# G. Q. Maguire Jr.
#
# 2017.04.21
# based on earlier program: compute_stats_for_pages_in_course.py
#
import csv, requests, time
from pprint import pprint
import optparse
import sys
from lxml import html
import json
#############################
###### EDIT THIS STUFF ######
#############################
# styled based upon https://martin-thoma.com/configuration-files-in-python/
with open('config.json') as json_data_file:
configuration = json.load(json_data_file)
canvas = configuration['canvas']
access_token= canvas["access_token"]
# access_token=configuration["canvas"]["access_token"]
#baseUrl = 'https://kth.instructure.com/api/v1/courses/' # changed to KTH domain
baseUrl = 'https://%s/api/v1/courses/' % canvas.get('host', 'kth.instructure.com')
header = {'Authorization' : 'Bearer ' + access_token}
#modules_csv = 'modules.csv' # name of file storing module names
log_file = 'log.txt' # a log file. it will log things
def write_to_log(message):
with open(log_file, 'a') as log:
log.write(message + "\n")
pprint(message)
def unique_URLs(txt):
set_of_unique_URLs=set()
text_words=txt.split()
for t in text_words:
if (t.find("http://") >= 0 or
t.find("HTTP://") >= 0 or
t.find("https://") >= 0 or
t.find("HTTPs://") >= 0):
set_of_unique_URLs.add(t)
return set_of_unique_URLs
def unique_KTH_Play_URLs(set_of_urls):
set_of_unique_URLs=set()
for t in set_of_urls:
if t.find("//play.kth.se") >= 0:
set_of_unique_URLs.add(t)
return set_of_unique_URLs
def compute_stats_for_pages_in_course(course_id):
list_of_all_pages=[]
page_stats=[]
# Use the Canvas API to get the list of pages for this course
#GET /api/v1/courses/:course_id/pages
url = baseUrl + '%s/pages' % (course_id)
if Verbose_Flag:
print("url: " + url)
r = requests.get(url, headers = header)
if Verbose_Flag:
write_to_log("result of getting pages: " + r.text)
if r.status_code == requests.codes.ok:
page_response=r.json()
else:
print("No pages for course_id: {}".format(course_id))
return False
for p_response in page_response:
list_of_all_pages.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of modules
# see "Handling Pagination" - Discussion created by [email protected] on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
page_response = r.json()
for p_response in page_response:
list_of_all_pages.append(p_response)
for p in list_of_all_pages:
# make a new list of links for each page
raw_links = set()
print("{}".format(p["title"]))
# Use the Canvas API to GET the page
#GET /api/v1/courses/:course_id/pages/:url
url = baseUrl + '%s/pages/%s' % (course_id, p["url"])
if Verbose_Flag:
print(url)
payload={}
r = requests.get(url, headers = header, data=payload)
if r.status_code == requests.codes.ok:
page_response = r.json()
if Verbose_Flag:
print("body: {}".format(page_response["body"]))
try:
document = html.document_fromstring(page_response["body"])
#raw_text = document.text_content()
for link in document.xpath('//a/@href'):
if Verbose_Flag:
print("link: {}".format(link))
raw_links.add(link)
except ValueError:
# if there is code on the page, for example a json structure, then the hyphenation package cannot handle this
if Verbose_Flag:
print("there is likely code on page {}".format(url))
continue
if Verbose_Flag:
print("raw_links: {}".format(raw_links))
else:
print("No pages for course_id: {}".format(course_id))
return False
# see http://www.erinhengel.com/software/textatistic/
try:
fixed_title=page_response["title"].replace(',', '_comma_')
fixed_title=fixed_title.replace('"', '_doublequote_')
fixed_title=fixed_title.replace("'", '_singlequote_')
page_entry={"url": url, "page_name": fixed_title, "unique URLs": unique_KTH_Play_URLs(raw_links)}
except ZeroDivisionError:
# if there are zero sentences, then some of the scores cannot be computed
if Verbose_Flag:
print("no sentences in page {}".format(url))
continue
except ValueError:
# if there is code on the page, for example a json structure, then the hyphenation package cannot handle this
if Verbose_Flag:
print("there is likely code on page {}".format(url))
continue
if page_entry:
page_stats.append(page_entry)
return page_stats
def get_course_syllabus(course_id):
page_stats=[]
# make a new list of links
raw_links = set()
# Use the Canvas API to get the list of pages for this course
#GET /api/v1/courses/:course_id?include[]=syllabus_body
url = baseUrl + '%s' % (course_id)
if Verbose_Flag:
print("url: " + url)
extra_parameters={'include[]': 'syllabus_body'}
r = requests.get(url, params=extra_parameters, headers = header)
if Verbose_Flag:
write_to_log("result of getting syllabus: " + r.text)
if r.status_code == requests.codes.ok:
page_response=r.json()
if Verbose_Flag:
print("body: {}".format(page_response["syllabus_body"]))
if len(page_response["syllabus_body"]) == 0:
return []
try:
document = html.document_fromstring(page_response["syllabus_body"])
#raw_text = document.text_content()
for link in document.xpath('//a/@href'):
if Verbose_Flag:
print("link: {}".format(link))
raw_links.add(link)
except ValueError:
# if there is code on the page, for example a json structure, then the hyphenation package cannot handle this
if Verbose_Flag:
print("there is likely code on page {}".format(url))
else:
print("No syllabus for course_id: {}".format(course_id))
return False
# see http://www.erinhengel.com/software/textatistic/
try:
fixed_title='Syllabus'
page_entry={"url": url, "page_name": fixed_title, "unique URLs": unique_KTH_Play_URLs(raw_links)}
except ZeroDivisionError:
# if there are zero sentences, then some of the scores cannot be computed
if Verbose_Flag:
print("no sentences in page {}".format(url))
except ValueError:
# if there is code on the page, for example a json structure, then the hyphenation package cannot handle this
if Verbose_Flag:
print("there is likely code on page {}".format(url))
if page_entry:
page_stats.append(page_entry)
return page_stats
def list_pages(course_id):
list_of_all_pages=[]
# Use the Canvas API to get the list of pages for this course
#GET /api/v1/courses/:course_id/pages
url = baseUrl + '%s/pages' % (course_id)
if Verbose_Flag:
print("url: " + url)
r = requests.get(url, headers = header)
if Verbose_Flag:
write_to_log("result of getting pages: " + r.text)
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
list_of_all_pages.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of modules
# see "Handling Pagination" - Discussion created by [email protected] on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
page_response = r.json()
for p_response in page_response:
list_of_all_pages.append(p_response)
for p in list_of_all_pages:
print("{}".format(p["title"]))
def get_assignments(course_id):
assignments_found_thus_far=[]
page_stats=[]
# make a new list of links
raw_links = set()
# Use the Canvas API to get the list of assignments for the course
#GET /api/v1/courses/:course_id/assignments
url = baseUrl + '%s/assignments' % (course_id)
if Verbose_Flag:
print("url: " + url)
r = requests.get(url, headers = header)
if Verbose_Flag:
write_to_log("result of getting assignments: " + r.text)
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
assignments_found_thus_far.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of modules
# see "Handling Pagination" - Discussion created by [email protected] on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
page_response = r.json()
for p_response in page_response:
assignments_found_thus_far.append(p_response)
for a in assignments_found_thus_far:
# make a new list of links for each assignment
raw_links = set()
print("{}".format(a["name"]))
url = a["html_url"]
if Verbose_Flag:
print(url)
if Verbose_Flag:
print("description: {}".format(a["description"]))
try:
document = html.document_fromstring(a["description"])
#raw_text = document.text_content()
for link in document.xpath('//a/@href'):
if Verbose_Flag:
print("link: {}".format(link))
raw_links.add(link)
except ValueError:
# if there is code on the page, for example a json structure, then the hyphenation package cannot handle this
if Verbose_Flag:
print("there is likely code on page {}".format(url))
continue
# see http://www.erinhengel.com/software/textatistic/
try:
fixed_title=a["name"].replace(',', '_comma_')
fixed_title=fixed_title.replace('"', '_doublequote_')
fixed_title=fixed_title.replace("'", '_singlequote_')
page_entry={"url": url, "page_name": fixed_title, "unique URLs": unique_KTH_Play_URLs(raw_links)}
except ZeroDivisionError:
# if there are zero sentences, then some of the scores cannot be computed
if Verbose_Flag:
print("no sentences in assignment {}".format(url))
continue
except ValueError:
# if there is code on the page, for example a json structure, then the hyphenation package cannot handle this
if Verbose_Flag:
print("there is likely code on page {}".format(url))
continue
if page_entry:
page_stats.append(page_entry)
return page_stats
def main():
global Verbose_Flag
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose',
dest="verbose",
default=False,
action="store_true",
help="Print lots of output to stdout"
)
options, remainder = parser.parse_args()
Verbose_Flag=options.verbose
if Verbose_Flag:
print('ARGV :', sys.argv[1:])
print('VERBOSE :', options.verbose)
print('REMAINING :', remainder)
# add time stamp to log file
log_time = str(time.asctime(time.localtime(time.time())))
if Verbose_Flag:
write_to_log(log_time)
if (len(remainder) < 1):
print("Inusffient arguments\n must provide course_id\n")
else:
course_id=remainder[0]
output=compute_stats_for_pages_in_course(course_id)
if Verbose_Flag:
print("output: {}".format(output))
output2=get_course_syllabus(course_id)
if Verbose_Flag:
print("output2: {}".format(output2))
for i in output2:
output.append(i)
if Verbose_Flag:
print("output following syllabus processing: {}".format(output))
output3=get_assignments(course_id)
if Verbose_Flag:
print("output3: {}".format(output3))
for i in output3:
output.append(i)
if Verbose_Flag:
print("output following assignment processing: {}".format(output))
if (output):
if Verbose_Flag:
print(output)
with open('KTHplay_URLs_for_course_'+course_id+'.csv', "wb") as writer:
spreadsheet_headings = ['url', 'page_name', 'unique URLs']
for heading in spreadsheet_headings:
encoded_output =bytes((heading + ","), 'UTF-8')
writer.write(encoded_output)
writer.write(bytes(u'\n', 'UTF-8'))
for item in output:
out_row = [item['url'], item['page_name'], item['unique URLs']]
for v in out_row:
if type(v) is str:
encoded_output = bytes((v + ","), 'UTF-8')
else:
encoded_output = bytes((str(v) + ","), 'UTF-8')
writer.write(encoded_output)
writer.write(bytes(u'\n', 'UTF-8'))
writer.close()
# add time stamp to log file
log_time = str(time.asctime(time.localtime(time.time())))
if Verbose_Flag:
write_to_log(log_time)
write_to_log("\n--DONE--\n\n")
if __name__ == "__main__": main()
| [
"[email protected]"
]
| |
74b0a2c23703cb4e5ab03f2b4f26df4d4bbbd55f | c705b2620119df0d60e925e55228bfbb5de3f568 | /archives/twitter/add_to_list.py | b07b820c711aef611ff33b5d19f9e517e8424b05 | [
"Apache-2.0"
]
| permissive | mcxiaoke/python-labs | 5aa63ce90de5da56d59ca2954f6b3aeae7833559 | 61c0a1f91008ba82fc2f5a5deb19e60aec9df960 | refs/heads/master | 2021-08-05T03:47:51.844979 | 2021-07-24T11:06:13 | 2021-07-24T11:06:13 | 21,690,171 | 7 | 7 | Apache-2.0 | 2020-08-07T01:52:32 | 2014-07-10T10:20:17 | Python | UTF-8 | Python | false | false | 1,023 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2016-01-04 14:39:15
from __future__ import print_function, unicode_literals
import os
import sys
import codecs
import requests
import tweepy
from config import OWNER, OWNER_ID, CONSUMER_KEY, CONSUMER_SECRET, ACCESSS_TOKEN_KEY, ACCESS_TOKEN_SECRET
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESSS_TOKEN_KEY, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
def read_list(name):
if not os.path.isfile(name):
return None
with codecs.open(name, 'r', 'utf-8') as f:
return [line.rstrip('\n') for line in f]
def add_to_list(slug, screen_name):
print('add user: %s to list: %s' % (screen_name, slug))
api.add_list_member(slug=slug,
screen_name=screen_name,
owner_screen_name='dorauimi')
def main():
uids = read_list(sys.argv[1])
for uid in uids:
add_to_list('asiangirls', uid)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
1cf91c973076f5cd1c46e4f58e68999f1a89e80d | a2f67003e0eededb0c2d7645d83243d19af71340 | /exam_subject/Subject/apps/topic/migrations/0010_answer_analysis.py | 117680a8d906da18fcca8540dbfdcda5856ebd05 | []
| no_license | john123951/SubjectSystem | c6bf118627aa54ba56bd367f73528e66f51dcd58 | a7f8e6014f81ec4376f3c5f437a280e801ab22e4 | refs/heads/master | 2020-07-13T16:36:15.663952 | 2019-06-19T07:02:14 | 2019-06-19T07:02:14 | 205,115,935 | 7 | 0 | null | 2019-08-29T08:23:00 | 2019-08-29T08:22:59 | null | UTF-8 | Python | false | false | 422 | py | # Generated by Django 2.0.2 on 2019-05-05 22:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topic', '0009_examtime_exam_number'),
]
operations = [
migrations.AddField(
model_name='answer',
name='analysis',
field=models.CharField(default='', max_length=500, verbose_name='解析'),
),
]
| [
"[email protected]"
]
| |
c5708367337a0c64f2df12dcce951050022001b6 | 2af1e6357f51d0d08b1a991e2bd922b7bdc8c0b6 | /baekjoon/accepted/15480 LCA와 쿼리.py | 8220d3407e8cfb7390cba36119d50b67d795abeb | []
| no_license | grasshopperTrainer/coding_practice | 530e9912b10952c866d35d69f12c99b96959a22d | d1e5e6d6fa3f71f1a0105940fff1785068aec8b0 | refs/heads/master | 2023-06-01T13:30:15.362657 | 2021-06-08T08:40:15 | 2021-06-08T08:40:15 | 267,359,225 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,998 | py | # not accepted
from sys import stdin
from collections import deque
def solution(N, edges, asked):
nd_tree = {}
for a, b in edges:
nd_tree.setdefault(a, []).append(b)
nd_tree.setdefault(b, []).append(a)
answers = []
for root, a, b in asked:
# find directed tree and depth
tree = [i for i in range(N+1)]
node_depth = [0 for _ in range(N+1)]
max_depth = 0
que = deque([[root,0]])
visited = {root}
while que:
at, depth = que.popleft()
max_depth = max((max_depth, depth))
for goto in nd_tree[at]:
if goto not in visited:
visited.add(goto)
tree[goto] = at
node_depth[goto] = depth+1
que.append((goto, depth+1))
# build ancestor table
ancestry_d = len(bin(max_depth)[2:])+1
lca = [[root for _ in range(ancestry_d)] for _ in range(N+1)]
for node in range(1, N+1):
for anc in range(ancestry_d):
if anc == 0:
lca[node][anc] = tree[node]
else:
lca[node][anc] = lca[lca[node][anc-1]][anc-1]
# search asked
while node_depth[a] != node_depth[b]:
if node_depth[a] > node_depth[b]:
a = tree[a]
else:
b = tree[b]
while a != b:
anc = 0
print(a, b, anc, lca[a], lca[b], lca[a][anc+1], lca[b][anc+1])
while lca[a][anc+1] != lca[b][anc+1]:
anc += 1
a, b = lca[a][anc], lca[b][anc]
answers.append(a)
return answers
N = int(stdin.readline())
edges = []
for _ in range(N-1):
edges.append([int(c) for c in stdin.readline().strip().split(' ')])
M = int(stdin.readline())
asked = []
for _ in range(M):
asked.append([int(c) for c in stdin.readline().strip().split(' ')])
for a in solution(N, edges, asked):
print(a)
| [
"[email protected]"
]
| |
6a3307afb49fd8cb535570b49aaeafc631f3394b | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/network/v20170301/get_endpoint.py | 7e85c2b2da6288d9cba3d474eec4981e754e7d0c | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 8,850 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetEndpointResult',
'AwaitableGetEndpointResult',
'get_endpoint',
]
@pulumi.output_type
class GetEndpointResult:
"""
Class representing a Traffic Manager endpoint.
"""
def __init__(__self__, endpoint_location=None, endpoint_monitor_status=None, endpoint_status=None, geo_mapping=None, min_child_endpoints=None, name=None, priority=None, target=None, target_resource_id=None, type=None, weight=None):
if endpoint_location and not isinstance(endpoint_location, str):
raise TypeError("Expected argument 'endpoint_location' to be a str")
pulumi.set(__self__, "endpoint_location", endpoint_location)
if endpoint_monitor_status and not isinstance(endpoint_monitor_status, str):
raise TypeError("Expected argument 'endpoint_monitor_status' to be a str")
pulumi.set(__self__, "endpoint_monitor_status", endpoint_monitor_status)
if endpoint_status and not isinstance(endpoint_status, str):
raise TypeError("Expected argument 'endpoint_status' to be a str")
pulumi.set(__self__, "endpoint_status", endpoint_status)
if geo_mapping and not isinstance(geo_mapping, list):
raise TypeError("Expected argument 'geo_mapping' to be a list")
pulumi.set(__self__, "geo_mapping", geo_mapping)
if min_child_endpoints and not isinstance(min_child_endpoints, int):
raise TypeError("Expected argument 'min_child_endpoints' to be a int")
pulumi.set(__self__, "min_child_endpoints", min_child_endpoints)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if priority and not isinstance(priority, int):
raise TypeError("Expected argument 'priority' to be a int")
pulumi.set(__self__, "priority", priority)
if target and not isinstance(target, str):
raise TypeError("Expected argument 'target' to be a str")
pulumi.set(__self__, "target", target)
if target_resource_id and not isinstance(target_resource_id, str):
raise TypeError("Expected argument 'target_resource_id' to be a str")
pulumi.set(__self__, "target_resource_id", target_resource_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if weight and not isinstance(weight, int):
raise TypeError("Expected argument 'weight' to be a int")
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="endpointLocation")
def endpoint_location(self) -> Optional[str]:
"""
Specifies the location of the external or nested endpoints when using the ‘Performance’ traffic routing method.
"""
return pulumi.get(self, "endpoint_location")
@property
@pulumi.getter(name="endpointMonitorStatus")
def endpoint_monitor_status(self) -> Optional[str]:
"""
Gets or sets the monitoring status of the endpoint.
"""
return pulumi.get(self, "endpoint_monitor_status")
@property
@pulumi.getter(name="endpointStatus")
def endpoint_status(self) -> Optional[str]:
"""
Gets or sets the status of the endpoint.. If the endpoint is Enabled, it is probed for endpoint health and is included in the traffic routing method. Possible values are 'Enabled' and 'Disabled'.
"""
return pulumi.get(self, "endpoint_status")
@property
@pulumi.getter(name="geoMapping")
def geo_mapping(self) -> Optional[Sequence[str]]:
"""
Gets or sets the list of countries/regions mapped to this endpoint when using the ‘Geographic’ traffic routing method. Please consult Traffic Manager Geographic documentation for a full list of accepted values.
"""
return pulumi.get(self, "geo_mapping")
@property
@pulumi.getter(name="minChildEndpoints")
def min_child_endpoints(self) -> Optional[int]:
"""
Gets or sets the minimum number of endpoints that must be available in the child profile in order for the parent profile to be considered available. Only applicable to endpoint of type 'NestedEndpoints'.
"""
return pulumi.get(self, "min_child_endpoints")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets or sets the name of the Traffic Manager endpoint.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
"""
Gets or sets the priority of this endpoint when using the ‘Priority’ traffic routing method. Possible values are from 1 to 1000, lower values represent higher priority. This is an optional parameter. If specified, it must be specified on all endpoints, and no two endpoints can share the same priority value.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def target(self) -> Optional[str]:
"""
Gets or sets the fully-qualified DNS name of the endpoint. Traffic Manager returns this value in DNS responses to direct traffic to this endpoint.
"""
return pulumi.get(self, "target")
@property
@pulumi.getter(name="targetResourceId")
def target_resource_id(self) -> Optional[str]:
"""
Gets or sets the Azure Resource URI of the of the endpoint. Not applicable to endpoints of type 'ExternalEndpoints'.
"""
return pulumi.get(self, "target_resource_id")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Gets or sets the endpoint type of the Traffic Manager endpoint.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def weight(self) -> Optional[int]:
"""
Gets or sets the weight of this endpoint when using the 'Weighted' traffic routing method. Possible values are from 1 to 1000.
"""
return pulumi.get(self, "weight")
class AwaitableGetEndpointResult(GetEndpointResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEndpointResult(
endpoint_location=self.endpoint_location,
endpoint_monitor_status=self.endpoint_monitor_status,
endpoint_status=self.endpoint_status,
geo_mapping=self.geo_mapping,
min_child_endpoints=self.min_child_endpoints,
name=self.name,
priority=self.priority,
target=self.target,
target_resource_id=self.target_resource_id,
type=self.type,
weight=self.weight)
def get_endpoint(endpoint_name: Optional[str] = None,
endpoint_type: Optional[str] = None,
profile_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEndpointResult:
"""
Use this data source to access information about an existing resource.
:param str endpoint_name: The name of the Traffic Manager endpoint.
:param str endpoint_type: The type of the Traffic Manager endpoint.
:param str profile_name: The name of the Traffic Manager profile.
:param str resource_group_name: The name of the resource group containing the Traffic Manager endpoint.
"""
__args__ = dict()
__args__['endpointName'] = endpoint_name
__args__['endpointType'] = endpoint_type
__args__['profileName'] = profile_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20170301:getEndpoint', __args__, opts=opts, typ=GetEndpointResult).value
return AwaitableGetEndpointResult(
endpoint_location=__ret__.endpoint_location,
endpoint_monitor_status=__ret__.endpoint_monitor_status,
endpoint_status=__ret__.endpoint_status,
geo_mapping=__ret__.geo_mapping,
min_child_endpoints=__ret__.min_child_endpoints,
name=__ret__.name,
priority=__ret__.priority,
target=__ret__.target,
target_resource_id=__ret__.target_resource_id,
type=__ret__.type,
weight=__ret__.weight)
| [
"[email protected]"
]
| |
e048837fa12f55157f9452e0736edb9ff1bd7cf7 | eeb7e70b0b68decbdcb32682351e54e0be99a5b0 | /kaggle/python_files/sample904.py | 82ab437aca033f7a777414cf29125f255fb92898 | []
| no_license | SocioProphet/CodeGraph | 8bafd7f03204f20da8f54ab23b04f3844e6d24de | 215ac4d16d21d07e87964fe9a97a5bf36f4c7d64 | refs/heads/master | 2023-02-16T02:51:27.791886 | 2021-01-15T07:00:41 | 2021-01-15T07:00:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,371 | py | #!/usr/bin/env python
# coding: utf-8
# **Notebook Objective:**
#
# Objective of the notebook is to look at the different pretrained embeddings provided in the dataset and to see how they are useful in the model building process.
#
# First let us import the necessary modules and read the input data.
# In[ ]:
import os
import time
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from tqdm import tqdm
import math
from sklearn.model_selection import train_test_split
from sklearn import metrics
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, CuDNNGRU, Conv1D
from keras.layers import Bidirectional, GlobalMaxPool1D
from keras.models import Model
from keras import initializers, regularizers, constraints, optimizers, layers
# In[ ]:
train_df = pd.read_csv("../input/train.csv")
test_df = pd.read_csv("../input/test.csv")
print("Train shape : ",train_df.shape)
print("Test shape : ",test_df.shape)
# Next steps are as follows:
# * Split the training dataset into train and val sample. Cross validation is a time consuming process and so let us do simple train val split.
# * Fill up the missing values in the text column with '_na_'
# * Tokenize the text column and convert them to vector sequences
# * Pad the sequence as needed - if the number of words in the text is greater than 'max_len' trunacate them to 'max_len' or if the number of words in the text is lesser than 'max_len' add zeros for remaining values.
# In[ ]:
## split to train and val
train_df, val_df = train_test_split(train_df, test_size=0.1, random_state=2018)
## some config values
embed_size = 300 # how big is each word vector
max_features = 50000 # how many unique words to use (i.e num rows in embedding vector)
maxlen = 100 # max number of words in a question to use
## fill up the missing values
train_X = train_df["question_text"].fillna("_na_").values
val_X = val_df["question_text"].fillna("_na_").values
test_X = test_df["question_text"].fillna("_na_").values
## Tokenize the sentences
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(train_X))
train_X = tokenizer.texts_to_sequences(train_X)
val_X = tokenizer.texts_to_sequences(val_X)
test_X = tokenizer.texts_to_sequences(test_X)
## Pad the sentences
train_X = pad_sequences(train_X, maxlen=maxlen)
val_X = pad_sequences(val_X, maxlen=maxlen)
test_X = pad_sequences(test_X, maxlen=maxlen)
## Get the target values
train_y = train_df['target'].values
val_y = val_df['target'].values
# **Without Pretrained Embeddings:**
#
# Now that we are done with all the necessary preprocessing steps, we can first train a Bidirectional GRU model. We will not use any pre-trained word embeddings for this model and the embeddings will be learnt from scratch. Please check out the model summary for the details of the layers used.
# In[ ]:
inp = Input(shape=(maxlen,))
x = Embedding(max_features, embed_size)(inp)
x = Bidirectional(CuDNNGRU(64, return_sequences=True))(x)
x = GlobalMaxPool1D()(x)
x = Dense(16, activation="relu")(x)
x = Dropout(0.1)(x)
x = Dense(1, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
# Train the model using train sample and monitor the metric on the valid sample. This is just a sample model running for 2 epochs. Changing the epochs, batch_size and model parameters might give us a better model.
# In[ ]:
## Train the model
model.fit(train_X, train_y, batch_size=512, epochs=2, validation_data=(val_X, val_y))
# Now let us get the validation sample predictions and also get the best threshold for F1 score.
# In[ ]:
pred_noemb_val_y = model.predict([val_X], batch_size=1024, verbose=1)
for thresh in np.arange(0.1, 0.501, 0.01):
thresh = np.round(thresh, 2)
print("F1 score at threshold {0} is {1}".format(thresh, metrics.f1_score(val_y, (pred_noemb_val_y>thresh).astype(int))))
# Now let us get the test set predictions as well and save them
# In[ ]:
pred_noemb_test_y = model.predict([test_X], batch_size=1024, verbose=1)
# Now that our model building is done, it might be a good idea to clean up some memory before we go to the next step.
# In[ ]:
del model, inp, x
import gc; gc.collect()
time.sleep(10)
# So we got some baseline GRU model without pre-trained embeddings. Now let us use the provided embeddings and rebuild the model again to see the performance.
#
#
# In[ ]:
# We have four different types of embeddings.
# * GoogleNews-vectors-negative300 - https://code.google.com/archive/p/word2vec/
# * glove.840B.300d - https://nlp.stanford.edu/projects/glove/
# * paragram_300_sl999 - https://cogcomp.org/page/resource_view/106
# * wiki-news-300d-1M - https://fasttext.cc/docs/en/english-vectors.html
#
# A very good explanation for different types of embeddings are given in this [kernel](https://www.kaggle.com/sbongo/do-pretrained-embeddings-give-you-the-extra-edge). Please refer the same for more details..
#
# **Glove Embeddings:**
#
# In this section, let us use the Glove embeddings and rebuild the GRU model.
# In[ ]:
EMBEDDING_FILE = '../input/embeddings/glove.840B.300d/glove.840B.300d.txt'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE))
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = all_embs.mean(), all_embs.std()
embed_size = all_embs.shape[1]
word_index = tokenizer.word_index
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))
for word, i in word_index.items():
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
inp = Input(shape=(maxlen,))
x = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)
x = Bidirectional(CuDNNGRU(64, return_sequences=True))(x)
x = GlobalMaxPool1D()(x)
x = Dense(16, activation="relu")(x)
x = Dropout(0.1)(x)
x = Dense(1, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
# In[ ]:
model.fit(train_X, train_y, batch_size=512, epochs=2, validation_data=(val_X, val_y))
# In[ ]:
pred_glove_val_y = model.predict([val_X], batch_size=1024, verbose=1)
for thresh in np.arange(0.1, 0.501, 0.01):
thresh = np.round(thresh, 2)
print("F1 score at threshold {0} is {1}".format(thresh, metrics.f1_score(val_y, (pred_glove_val_y>thresh).astype(int))))
# Results seem to be better than the model without pretrained embeddings.
# In[ ]:
pred_glove_test_y = model.predict([test_X], batch_size=1024, verbose=1)
# In[ ]:
del word_index, embeddings_index, all_embs, embedding_matrix, model, inp, x
import gc; gc.collect()
time.sleep(10)
# **Wiki News FastText Embeddings:**
#
# Now let us use the FastText embeddings trained on Wiki News corpus in place of Glove embeddings and rebuild the model.
# In[ ]:
EMBEDDING_FILE = '../input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE) if len(o)>100)
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = all_embs.mean(), all_embs.std()
embed_size = all_embs.shape[1]
word_index = tokenizer.word_index
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))
for word, i in word_index.items():
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
inp = Input(shape=(maxlen,))
x = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)
x = Bidirectional(CuDNNGRU(64, return_sequences=True))(x)
x = GlobalMaxPool1D()(x)
x = Dense(16, activation="relu")(x)
x = Dropout(0.1)(x)
x = Dense(1, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# In[ ]:
model.fit(train_X, train_y, batch_size=512, epochs=2, validation_data=(val_X, val_y))
# In[ ]:
pred_fasttext_val_y = model.predict([val_X], batch_size=1024, verbose=1)
for thresh in np.arange(0.1, 0.501, 0.01):
thresh = np.round(thresh, 2)
print("F1 score at threshold {0} is {1}".format(thresh, metrics.f1_score(val_y, (pred_fasttext_val_y>thresh).astype(int))))
# In[ ]:
pred_fasttext_test_y = model.predict([test_X], batch_size=1024, verbose=1)
# In[ ]:
del word_index, embeddings_index, all_embs, embedding_matrix, model, inp, x
import gc; gc.collect()
time.sleep(10)
# **Paragram Embeddings:**
#
# In this section, we can use the paragram embeddings and build the model and make predictions.
# In[ ]:
EMBEDDING_FILE = '../input/embeddings/paragram_300_sl999/paragram_300_sl999.txt'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore') if len(o)>100)
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = all_embs.mean(), all_embs.std()
embed_size = all_embs.shape[1]
word_index = tokenizer.word_index
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))
for word, i in word_index.items():
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
inp = Input(shape=(maxlen,))
x = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)
x = Bidirectional(CuDNNGRU(64, return_sequences=True))(x)
x = GlobalMaxPool1D()(x)
x = Dense(16, activation="relu")(x)
x = Dropout(0.1)(x)
x = Dense(1, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# In[ ]:
model.fit(train_X, train_y, batch_size=512, epochs=2, validation_data=(val_X, val_y))
# In[ ]:
pred_paragram_val_y = model.predict([val_X], batch_size=1024, verbose=1)
for thresh in np.arange(0.1, 0.501, 0.01):
thresh = np.round(thresh, 2)
print("F1 score at threshold {0} is {1}".format(thresh, metrics.f1_score(val_y, (pred_paragram_val_y>thresh).astype(int))))
# In[ ]:
pred_paragram_test_y = model.predict([test_X], batch_size=1024, verbose=1)
# In[ ]:
del word_index, embeddings_index, all_embs, embedding_matrix, model, inp, x
import gc; gc.collect()
time.sleep(10)
# **Observations:**
# * Overall pretrained embeddings seem to give better results comapred to non-pretrained model.
# * The performance of the different pretrained embeddings are almost similar.
#
# **Final Blend:**
#
# Though the results of the models with different pre-trained embeddings are similar, there is a good chance that they might capture different type of information from the data. So let us do a blend of these three models by averaging their predictions.
# In[ ]:
pred_val_y = 0.33*pred_glove_val_y + 0.33*pred_fasttext_val_y + 0.34*pred_paragram_val_y
for thresh in np.arange(0.1, 0.501, 0.01):
thresh = np.round(thresh, 2)
print("F1 score at threshold {0} is {1}".format(thresh, metrics.f1_score(val_y, (pred_val_y>thresh).astype(int))))
# The result seems to better than individual pre-trained models and so we let us create a submission file using this model blend.
# In[ ]:
pred_test_y = 0.33*pred_glove_test_y + 0.33*pred_fasttext_test_y + 0.34*pred_paragram_test_y
pred_test_y = (pred_test_y>0.35).astype(int)
out_df = pd.DataFrame({"qid":test_df["qid"].values})
out_df['prediction'] = pred_test_y
out_df.to_csv("submission.csv", index=False)
#
# **References:**
#
# Thanks to the below kernels which helped me with this one.
# 1. https://www.kaggle.com/jhoward/improved-lstm-baseline-glove-dropout
# 2. https://www.kaggle.com/sbongo/do-pretrained-embeddings-give-you-the-extra-edge
| [
"[email protected]"
]
| |
ed66afc6b66bb066763ac8e3bfe8202fffbd4239 | 9c61ec2a55e897e4a3bb9145296081c648d812c4 | /docs/cd/06443007程式碼/ch10/10-7.py | 12e0599c6cd9b2461a1e86381e5859ddb809a4da | []
| no_license | wildboy2arthur/ML-Class | 47899246251d12972a6d3875160c1cc8d1052202 | 345c86e3f8890919d59a63a79674acbdcd4577c4 | refs/heads/main | 2023-07-16T11:32:07.683652 | 2021-08-24T08:25:04 | 2021-08-24T08:25:04 | 399,388,026 | 0 | 0 | null | 2021-08-24T08:18:36 | 2021-08-24T08:18:35 | null | UTF-8 | Python | false | false | 74 | py | sns.scatterplot(x='mean radius', y='mean texture', data=df, hue='target'); | [
"[email protected]"
]
| |
3ec63c24410051f8a1dc64905bbeb91ff0b787ea | 4839df5ce210b0d2d74a67677a2ec3d4faacf74d | /tally_ho/apps/tally/tests/views/reports/test_administrative_areas_reports.py | 4bc221d7f49dadf7c0978dcd26137ff7772fdb3b | [
"Apache-2.0"
]
| permissive | hashem92/tally-ho | 5bf7f8f30804362ccf862d5d9a920bb1ce4bb17b | f1667a5dbef808f37c8717ebfacf53499333370c | refs/heads/master | 2023-05-05T08:01:14.968280 | 2020-11-05T13:48:21 | 2020-11-05T13:48:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,634 | py | from django.test import RequestFactory
from tally_ho.libs.permissions import groups
from tally_ho.apps.tally.models.sub_constituency import SubConstituency
from tally_ho.apps.tally.views.reports import administrative_areas_reports
from tally_ho.libs.tests.test_base import create_result_form,\
create_station, create_reconciliation_form, create_tally,\
create_center, create_region, create_constituency, create_office, TestBase
class TestAdministrativeAreasReports(TestBase):
def setUp(self):
self.factory = RequestFactory()
self._create_permission_groups()
self._create_and_login_user()
self._add_user_to_group(self.user, groups.TALLY_MANAGER)
self.tally = create_tally()
self.tally.users.add(self.user)
region = create_region(tally=self.tally)
office = create_office(tally=self.tally, region=region)
constituency = create_constituency(tally=self.tally)
sc, _ = SubConstituency.objects.get_or_create(code=1, field_office='1')
center = create_center(tally=self.tally,
sub_constituency=sc,
constituency=constituency)
station = create_station(center=center, registrants=20)
result_form = create_result_form(
tally=self.tally,
office=office,
center=center,
station_number=station.station_number)
create_reconciliation_form(
result_form=result_form,
user=self.user,
number_ballots_inside_box=20,
number_cancelled_ballots=0,
number_spoiled_ballots=0,
number_unstamped_ballots=0,
number_unused_ballots=0,
number_valid_votes=20,
number_invalid_votes=0,
number_ballots_received=20,
)
def test_regions_reports(self):
"""
Test that the region reports are rendered as expected.
"""
request = self._get_request()
view = administrative_areas_reports.RegionsReportsView.as_view()
request = self.factory.get('/reports-regions')
request.user = self.user
response = view(
request,
tally_id=self.tally.pk,
group_name=groups.TALLY_MANAGER)
regions_turnout_report =\
administrative_areas_reports.generate_voters_turnout_report(
self.tally.id, 'result_form__office__region__name')[0]
self.assertContains(response, "<h1>Region Reports</h1>")
# Region turnout report tests
self.assertContains(response, "<h3>Turn Out Report</h3>")
self.assertContains(response, "<th>Region Name</th>")
self.assertContains(response, "<th>Total number of voters</th>")
self.assertContains(response, "<th>Number of voters voted</th>")
self.assertContains(response, "<th>Male voters</th>")
self.assertContains(response, "<th>Female voters</th>")
self.assertContains(response, "<th>Turnout percentage</th>")
self.assertContains(
response,
f'<td>{regions_turnout_report["name"]}</td>')
self.assertContains(
response,
f'<td>{regions_turnout_report["number_of_voters_voted"]}</td>')
self.assertContains(
response,
str('<td>'
f'{regions_turnout_report["total_number_of_registrants"]}'
'</td>'))
self.assertContains(
response,
str('<td>'
f'{regions_turnout_report["total_number_of_ballots_used"]}'
'</td>'))
self.assertContains(
response,
f'<td>{regions_turnout_report["male_voters"]}</td>')
self.assertContains(
response,
f'<td>{regions_turnout_report["female_voters"]}</td>')
self.assertContains(
response,
f'<td>{regions_turnout_report["turnout_percentage"]} %</td>')
votes_summary_report =\
administrative_areas_reports.generate_votes_summary_report(
self.tally.id, 'result_form__office__region__name')[0]
# Region votes summary report tests
self.assertContains(response, "<h3>Votes Summary Report</h3>")
self.assertContains(response, "<th>Region Name</th>")
self.assertContains(response, "<th>Total number of valid votes</th>")
self.assertContains(response, "<th>Total number of invalid votes</th>")
self.assertContains(
response, "<th>Total number of cancelled votes</th>")
self.assertContains(
response,
f'<td>{votes_summary_report["name"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_valid_votes"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_invalid_votes"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_cancelled_ballots"]}</td>')
def test_constituency_reports(self):
"""
Test that the constituency reports are rendered as expected.
"""
request = self._get_request()
view = administrative_areas_reports.ConstituencyReportsView.as_view()
request = self.factory.get('/reports-constituencies')
request.user = self.user
response = view(
request,
tally_id=self.tally.pk,
group_name=groups.TALLY_MANAGER)
turnout_report =\
administrative_areas_reports.generate_voters_turnout_report(
self.tally.id, 'result_form__center__constituency__name')[0]
self.assertContains(response, "<h1>Constituency Reports</h1>")
# Constituency turnout report tests
self.assertContains(response, "<h3>Turn Out Report</h3>")
self.assertContains(response, "<th>Constituency Name</th>")
self.assertContains(response, "<th>Total number of voters</th>")
self.assertContains(response, "<th>Number of voters voted</th>")
self.assertContains(response, "<th>Male voters</th>")
self.assertContains(response, "<th>Female voters</th>")
self.assertContains(response, "<th>Turnout percentage</th>")
self.assertContains(
response,
f'<td>{turnout_report["name"]}</td>')
self.assertContains(
response,
f'<td>{turnout_report["number_of_voters_voted"]}</td>')
self.assertContains(
response,
str('<td>'
f'{turnout_report["total_number_of_registrants"]}'
'</td>'))
self.assertContains(
response,
str('<td>'
f'{turnout_report["total_number_of_ballots_used"]}'
'</td>'))
self.assertContains(
response,
f'<td>{turnout_report["male_voters"]}</td>')
self.assertContains(
response,
f'<td>{turnout_report["female_voters"]}</td>')
self.assertContains(
response,
f'<td>{turnout_report["turnout_percentage"]} %</td>')
votes_summary_report =\
administrative_areas_reports.generate_votes_summary_report(
self.tally.id, 'result_form__center__constituency__name')[0]
# Constituency votes summary report tests
self.assertContains(response, "<h3>Votes Summary Report</h3>")
self.assertContains(response, "<th>Constituency Name</th>")
self.assertContains(response, "<th>Total number of valid votes</th>")
self.assertContains(response, "<th>Total number of invalid votes</th>")
self.assertContains(
response, "<th>Total number of cancelled votes</th>")
self.assertContains(
response,
f'<td>{votes_summary_report["name"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_valid_votes"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_invalid_votes"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_cancelled_ballots"]}</td>')
def test_sub_constituency_reports(self):
"""
Test that the sub constituency reports are rendered as expected.
"""
request = self._get_request()
view =\
administrative_areas_reports.SubConstituencyReportsView.as_view()
request = self.factory.get('/reports-sub-constituencies')
request.user = self.user
response = view(
request,
tally_id=self.tally.pk,
group_name=groups.TALLY_MANAGER)
turnout_report =\
administrative_areas_reports.generate_voters_turnout_report(
self.tally.id,
'result_form__center__sub_constituency__code')[0]
self.assertContains(response, "<h1>Sub Constituency Reports</h1>")
# Sub Constituency turnout report tests
self.assertContains(response, "<h3>Turn Out Report</h3>")
self.assertContains(response, "<th>Sub Constituency Name</th>")
self.assertContains(response, "<th>Total number of voters</th>")
self.assertContains(response, "<th>Number of voters voted</th>")
self.assertContains(response, "<th>Male voters</th>")
self.assertContains(response, "<th>Female voters</th>")
self.assertContains(response, "<th>Turnout percentage</th>")
self.assertContains(
response,
f'<td>{turnout_report["name"]}</td>')
self.assertContains(
response,
f'<td>{turnout_report["number_of_voters_voted"]}</td>')
self.assertContains(
response,
str('<td>'
f'{turnout_report["total_number_of_registrants"]}'
'</td>'))
self.assertContains(
response,
str('<td>'
f'{turnout_report["total_number_of_ballots_used"]}'
'</td>'))
self.assertContains(
response,
f'<td>{turnout_report["male_voters"]}</td>')
self.assertContains(
response,
f'<td>{turnout_report["female_voters"]}</td>')
self.assertContains(
response,
f'<td>{turnout_report["turnout_percentage"]} %</td>')
votes_summary_report =\
administrative_areas_reports.generate_votes_summary_report(
self.tally.id,
'result_form__center__sub_constituency__code')[0]
# Sub Constituency votes summary report tests
self.assertContains(response, "<h3>Votes Summary Report</h3>")
self.assertContains(response, "<th>Sub Constituency Name</th>")
self.assertContains(response, "<th>Total number of valid votes</th>")
self.assertContains(response, "<th>Total number of invalid votes</th>")
self.assertContains(
response, "<th>Total number of cancelled votes</th>")
self.assertContains(
response,
f'<td>{votes_summary_report["name"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_valid_votes"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_invalid_votes"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_cancelled_ballots"]}</td>')
| [
"[email protected]"
]
| |
e35702ff865d77d881bb1f8c662a0694bcae1d85 | 027bdfb0f5dd6e7fe86189324a2c7ebd3a1ebea9 | /hydrus/client/db/ClientDBMappingsCacheSpecificStorage.py | 83ba6be205cd310a23f5eb700d6bfbe24c4fb7c0 | [
"WTFPL"
]
| permissive | pianomanx/hydrus | 5299a1bcc383760b7ed349e047467f6ac8fa6a43 | 368309645f85ecff832c0a968b3492bf582cdad5 | refs/heads/master | 2023-09-02T14:19:42.516186 | 2023-08-30T21:00:53 | 2023-08-30T21:00:53 | 90,190,997 | 0 | 0 | NOASSERTION | 2023-09-14T09:10:58 | 2017-05-03T20:33:50 | Python | UTF-8 | Python | false | false | 29,320 | py | import collections
import itertools
import sqlite3
import typing
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusDBBase
from hydrus.core import HydrusLists
from hydrus.core import HydrusTime
from hydrus.client.db import ClientDBFilesStorage
from hydrus.client.db import ClientDBMaintenance
from hydrus.client.db import ClientDBMappingsCacheSpecificDisplay
from hydrus.client.db import ClientDBMappingsCounts
from hydrus.client.db import ClientDBMappingsCountsUpdate
from hydrus.client.db import ClientDBMappingsStorage
from hydrus.client.db import ClientDBModule
from hydrus.client.db import ClientDBServices
from hydrus.client.metadata import ClientTags
class FilteredHashesGenerator( object ):
def __init__( self, file_service_ids_to_valid_hash_ids ):
self._file_service_ids_to_valid_hash_ids = file_service_ids_to_valid_hash_ids
def GetHashes( self, file_service_id, hash_ids ):
return self._file_service_ids_to_valid_hash_ids[ file_service_id ].intersection( hash_ids )
def IterateHashes( self, hash_ids ):
for ( file_service_id, valid_hash_ids ) in self._file_service_ids_to_valid_hash_ids.items():
if len( valid_hash_ids ) == 0:
continue
filtered_hash_ids = valid_hash_ids.intersection( hash_ids )
if len( filtered_hash_ids ) == 0:
continue
yield ( file_service_id, filtered_hash_ids )
class FilteredMappingsGenerator( object ):
def __init__( self, file_service_ids_to_valid_hash_ids, mappings_ids ):
self._file_service_ids_to_valid_hash_ids = file_service_ids_to_valid_hash_ids
self._mappings_ids = mappings_ids
def IterateMappings( self, file_service_id ):
valid_hash_ids = self._file_service_ids_to_valid_hash_ids[ file_service_id ]
if len( valid_hash_ids ) > 0:
for ( tag_id, hash_ids ) in self._mappings_ids:
hash_ids = valid_hash_ids.intersection( hash_ids )
if len( hash_ids ) == 0:
continue
yield ( tag_id, hash_ids )
class ClientDBMappingsCacheSpecificStorage( ClientDBModule.ClientDBModule ):
CAN_REPOPULATE_ALL_MISSING_DATA = True
def __init__( self, cursor: sqlite3.Cursor, modules_services: ClientDBServices.ClientDBMasterServices, modules_db_maintenance: ClientDBMaintenance.ClientDBMaintenance, modules_mappings_counts: ClientDBMappingsCounts.ClientDBMappingsCounts, modules_mappings_counts_update: ClientDBMappingsCountsUpdate.ClientDBMappingsCountsUpdate, modules_files_storage: ClientDBFilesStorage.ClientDBFilesStorage, modules_mappings_cache_specific_display: ClientDBMappingsCacheSpecificDisplay.ClientDBMappingsCacheSpecificDisplay ):
self.modules_services = modules_services
self.modules_db_maintenance = modules_db_maintenance
self.modules_mappings_counts = modules_mappings_counts
self.modules_mappings_counts_update = modules_mappings_counts_update
self.modules_files_storage = modules_files_storage
self.modules_mappings_cache_specific_display = modules_mappings_cache_specific_display
self._missing_tag_service_pairs = set()
ClientDBModule.ClientDBModule.__init__( self, 'client specific display mappings cache', cursor )
def _GetServiceIndexGenerationDictSingle( self, file_service_id, tag_service_id ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
version = 486 if file_service_id == self.modules_services.combined_local_media_service_id else 400
index_generation_dict = {}
index_generation_dict[ cache_current_mappings_table_name ] = [
( [ 'tag_id', 'hash_id' ], True, version )
]
index_generation_dict[ cache_deleted_mappings_table_name ] = [
( [ 'tag_id', 'hash_id' ], True, version )
]
index_generation_dict[ cache_pending_mappings_table_name ] = [
( [ 'tag_id', 'hash_id' ], True, version )
]
return index_generation_dict
def _GetServiceIndexGenerationDict( self, service_id ) -> dict:
tag_service_id = service_id
index_dict = {}
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES ) )
for file_service_id in file_service_ids:
single_index_dict = self._GetServiceIndexGenerationDictSingle( file_service_id, tag_service_id )
index_dict.update( single_index_dict )
return index_dict
def _GetServiceTableGenerationDictSingle( self, file_service_id, tag_service_id ):
table_dict = {}
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
version = 486 if file_service_id == self.modules_services.combined_local_media_service_id else 400
table_dict[ cache_current_mappings_table_name ] = ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER, tag_id INTEGER, PRIMARY KEY ( hash_id, tag_id ) ) WITHOUT ROWID;', version )
table_dict[ cache_deleted_mappings_table_name ] = ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER, tag_id INTEGER, PRIMARY KEY ( hash_id, tag_id ) ) WITHOUT ROWID;', version )
table_dict[ cache_pending_mappings_table_name ] = ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER, tag_id INTEGER, PRIMARY KEY ( hash_id, tag_id ) ) WITHOUT ROWID;', version )
return table_dict
def _GetServiceTableGenerationDict( self, service_id ) -> dict:
tag_service_id = service_id
table_dict = {}
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES ) )
for file_service_id in file_service_ids:
single_table_dict = self._GetServiceTableGenerationDictSingle( file_service_id, tag_service_id )
table_dict.update( single_table_dict )
return table_dict
def _GetServiceIdsWeGenerateDynamicTablesFor( self ):
return self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
def _RepairRepopulateTables( self, table_names, cursor_transaction_wrapper: HydrusDBBase.DBCursorTransactionWrapper ):
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES ) )
tag_service_ids = list( self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES ) )
for tag_service_id in tag_service_ids:
for file_service_id in file_service_ids:
table_dict_for_this = self._GetServiceTableGenerationDictSingle( file_service_id, tag_service_id )
table_names_for_this = set( table_dict_for_this.keys() )
if not table_names_for_this.isdisjoint( table_names ):
self._missing_tag_service_pairs.add( ( file_service_id, tag_service_id ) )
def AddFiles( self, file_service_id, tag_service_id, hash_ids, hash_ids_table_name ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
# deleted don't have a/c counts to update, so we can do it all in one go here
self._Execute( 'INSERT OR IGNORE INTO {} ( hash_id, tag_id ) SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( cache_deleted_mappings_table_name, hash_ids_table_name, deleted_mappings_table_name ) )
# temp hashes to mappings
current_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_ids_table_name, current_mappings_table_name ) ).fetchall()
current_mapping_ids_dict = HydrusData.BuildKeyToSetDict( current_mapping_ids_raw )
# temp hashes to mappings
pending_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_ids_table_name, pending_mappings_table_name ) ).fetchall()
pending_mapping_ids_dict = HydrusData.BuildKeyToSetDict( pending_mapping_ids_raw )
all_ids_seen = set( current_mapping_ids_dict.keys() )
all_ids_seen.update( pending_mapping_ids_dict.keys() )
counts_cache_changes = []
for tag_id in all_ids_seen:
current_hash_ids = current_mapping_ids_dict[ tag_id ]
current_delta = len( current_hash_ids )
if current_delta > 0:
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_current_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in current_hash_ids ) )
current_delta = self._GetRowCount()
#
pending_hash_ids = pending_mapping_ids_dict[ tag_id ]
pending_delta = len( pending_hash_ids )
if pending_delta > 0:
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_pending_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in pending_hash_ids ) )
pending_delta = self._GetRowCount()
#
if current_delta > 0 or pending_delta > 0:
counts_cache_changes.append( ( tag_id, current_delta, pending_delta ) )
if len( counts_cache_changes ) > 0:
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
def AddMappings( self, tag_service_id, tag_id, hash_ids, filtered_hashes_generator: FilteredHashesGenerator ):
for ( file_service_id, filtered_hash_ids ) in filtered_hashes_generator.IterateHashes( hash_ids ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
# we have to interleave this into the iterator so that if two siblings with the same ideal are pend->currented at once, we remain logic consistent for soletag lookups!
self.modules_mappings_cache_specific_display.RescindPendingMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
self._ExecuteMany( 'DELETE FROM ' + cache_pending_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_pending_rescinded = self._GetRowCount()
#
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_current_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_current_inserted = self._GetRowCount()
#
self._ExecuteMany( 'DELETE FROM ' + cache_deleted_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
if num_current_inserted > 0:
counts_cache_changes = [ ( tag_id, num_current_inserted, 0 ) ]
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
if num_pending_rescinded > 0:
counts_cache_changes = [ ( tag_id, 0, num_pending_rescinded ) ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
self.modules_mappings_cache_specific_display.AddMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
def Clear( self, file_service_id, tag_service_id, keep_pending = False ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self._Execute( 'DELETE FROM {};'.format( cache_current_mappings_table_name ) )
self._Execute( 'DELETE FROM {};'.format( cache_deleted_mappings_table_name ) )
if not keep_pending:
self._Execute( 'DELETE FROM {};'.format( cache_pending_mappings_table_name ) )
self.modules_mappings_counts.ClearCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, keep_pending = keep_pending )
self.modules_mappings_cache_specific_display.Clear( file_service_id, tag_service_id, keep_pending = keep_pending )
def CreateTables( self, file_service_id, tag_service_id ):
table_generation_dict = self._GetServiceTableGenerationDictSingle( file_service_id, tag_service_id )
for ( table_name, ( create_query_without_name, version_added ) ) in table_generation_dict.items():
self._CreateTable( create_query_without_name, table_name )
self.modules_mappings_counts.CreateTables( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id )
def Drop( self, file_service_id, tag_service_id ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self.modules_db_maintenance.DeferredDropTable( cache_current_mappings_table_name )
self.modules_db_maintenance.DeferredDropTable( cache_deleted_mappings_table_name )
self.modules_db_maintenance.DeferredDropTable( cache_pending_mappings_table_name )
self.modules_mappings_counts.DropTables( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id )
self.modules_mappings_cache_specific_display.Drop( file_service_id, tag_service_id )
def DeleteFiles( self, file_service_id, tag_service_id, hash_ids, hash_id_table_name ):
self.modules_mappings_cache_specific_display.DeleteFiles( file_service_id, tag_service_id, hash_ids, hash_id_table_name )
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
# temp hashes to mappings
deleted_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_id_table_name, cache_deleted_mappings_table_name ) ).fetchall()
if len( deleted_mapping_ids_raw ) > 0:
self._ExecuteMany( 'DELETE FROM {} WHERE tag_id = ? AND hash_id = ?;'.format( cache_deleted_mappings_table_name ), deleted_mapping_ids_raw )
# temp hashes to mappings
current_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_id_table_name, cache_current_mappings_table_name ) ).fetchall()
current_mapping_ids_dict = HydrusData.BuildKeyToSetDict( current_mapping_ids_raw )
# temp hashes to mappings
pending_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_id_table_name, cache_pending_mappings_table_name ) ).fetchall()
pending_mapping_ids_dict = HydrusData.BuildKeyToSetDict( pending_mapping_ids_raw )
all_ids_seen = set( current_mapping_ids_dict.keys() )
all_ids_seen.update( pending_mapping_ids_dict.keys() )
counts_cache_changes = []
for tag_id in all_ids_seen:
current_hash_ids = current_mapping_ids_dict[ tag_id ]
num_current = len( current_hash_ids )
#
pending_hash_ids = pending_mapping_ids_dict[ tag_id ]
num_pending = len( pending_hash_ids )
counts_cache_changes.append( ( tag_id, num_current, num_pending ) )
self._ExecuteMany( 'DELETE FROM ' + cache_current_mappings_table_name + ' WHERE hash_id = ?;', ( ( hash_id, ) for hash_id in hash_ids ) )
self._ExecuteMany( 'DELETE FROM ' + cache_pending_mappings_table_name + ' WHERE hash_id = ?;', ( ( hash_id, ) for hash_id in hash_ids ) )
if len( counts_cache_changes ) > 0:
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
def DeleteMappings( self, tag_service_id, tag_id, hash_ids, filtered_hashes_generator: FilteredHashesGenerator ):
for ( file_service_id, filtered_hash_ids ) in filtered_hashes_generator.IterateHashes( hash_ids ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self.modules_mappings_cache_specific_display.DeleteMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
self._ExecuteMany( 'DELETE FROM ' + cache_current_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_deleted = self._GetRowCount()
#
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_deleted_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
if num_deleted > 0:
counts_cache_changes = [ ( tag_id, num_deleted, 0 ) ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
def Generate( self, file_service_id, tag_service_id ):
self.CreateTables( file_service_id, tag_service_id )
#
hash_ids = self.modules_files_storage.GetCurrentHashIdsList( file_service_id )
BLOCK_SIZE = 10000
for ( i, block_of_hash_ids ) in enumerate( HydrusLists.SplitListIntoChunks( hash_ids, BLOCK_SIZE ) ):
with self._MakeTemporaryIntegerTable( block_of_hash_ids, 'hash_id' ) as temp_hash_id_table_name:
self.AddFiles( file_service_id, tag_service_id, block_of_hash_ids, temp_hash_id_table_name )
index_generation_dict = self._GetServiceIndexGenerationDictSingle( file_service_id, tag_service_id )
for ( table_name, columns, unique, version_added ) in self._FlattenIndexGenerationDict( index_generation_dict ):
self._CreateIndex( table_name, columns, unique = unique )
self.modules_db_maintenance.TouchAnalyzeNewTables()
self.modules_mappings_cache_specific_display.Generate( file_service_id, tag_service_id, populate_from_storage = True )
def GetFilteredHashesGenerator( self, file_service_ids, tag_service_id, hash_ids ) -> FilteredHashesGenerator:
file_service_ids_to_valid_hash_ids = collections.defaultdict( set )
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_table_name:
for file_service_id in file_service_ids:
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( file_service_id, temp_table_name, HC.CONTENT_STATUS_CURRENT )
valid_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {};'.format( table_join ) ) )
file_service_ids_to_valid_hash_ids[ file_service_id ] = valid_hash_ids
return FilteredHashesGenerator( file_service_ids_to_valid_hash_ids )
def GetFilteredMappingsGenerator( self, file_service_ids, tag_service_id, mappings_ids ) -> FilteredMappingsGenerator:
all_hash_ids = set( itertools.chain.from_iterable( ( hash_ids for ( tag_id, hash_ids ) in mappings_ids ) ) )
file_service_ids_to_valid_hash_ids = collections.defaultdict( set )
with self._MakeTemporaryIntegerTable( all_hash_ids, 'hash_id' ) as temp_table_name:
for file_service_id in file_service_ids:
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( file_service_id, temp_table_name, HC.CONTENT_STATUS_CURRENT )
valid_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {};'.format( table_join ) ) )
file_service_ids_to_valid_hash_ids[ file_service_id ] = valid_hash_ids
return FilteredMappingsGenerator( file_service_ids_to_valid_hash_ids, mappings_ids )
def GetMissingServicePairs( self ):
return self._missing_tag_service_pairs
def GetTablesAndColumnsThatUseDefinitions( self, content_type: int ) -> typing.List[ typing.Tuple[ str, str ] ]:
tables_and_columns = []
if content_type == HC.CONTENT_TYPE_TAG:
table_dict = self._GetServicesTableGenerationDict()
for table_name in table_dict.keys():
tables_and_columns.append( ( table_name, 'tag_id' ) )
elif content_type == HC.CONTENT_TYPE_HASH:
table_dict = self._GetServicesTableGenerationDict()
for table_name in table_dict.keys():
tables_and_columns.append( ( table_name, 'hash_id' ) )
return tables_and_columns
def PendMappings( self, tag_service_id, tag_id, hash_ids, filtered_hashes_generator: FilteredHashesGenerator ):
for ( file_service_id, filtered_hash_ids ) in filtered_hashes_generator.IterateHashes( hash_ids ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_pending_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_added = self._GetRowCount()
if num_added > 0:
counts_cache_changes = [ ( tag_id, 0, num_added ) ]
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
self.modules_mappings_cache_specific_display.PendMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
def RegeneratePending( self, file_service_id, tag_service_id, status_hook = None ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
if status_hook is not None:
message = 'clearing old specific data'
status_hook( message )
all_pending_storage_tag_ids = self._STS( self._Execute( 'SELECT DISTINCT tag_id FROM {};'.format( pending_mappings_table_name ) ) )
self.modules_mappings_counts.ClearCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, keep_current = True )
self._Execute( 'DELETE FROM {};'.format( cache_pending_mappings_table_name ) )
counts_cache_changes = []
num_to_do = len( all_pending_storage_tag_ids )
select_table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( file_service_id, pending_mappings_table_name, HC.CONTENT_STATUS_CURRENT )
for ( i, storage_tag_id ) in enumerate( all_pending_storage_tag_ids ):
if i % 100 == 0 and status_hook is not None:
message = 'regenerating pending tags {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, num_to_do ) )
status_hook( message )
self._Execute( 'INSERT OR IGNORE INTO {} ( tag_id, hash_id ) SELECT tag_id, hash_id FROM {} WHERE tag_id = ?;'.format( cache_pending_mappings_table_name, select_table_join ), ( storage_tag_id, ) )
pending_delta = self._GetRowCount()
counts_cache_changes.append( ( storage_tag_id, 0, pending_delta ) )
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
self.modules_mappings_cache_specific_display.RegeneratePending( file_service_id, tag_service_id, status_hook = status_hook )
def RescindPendingMappings( self, tag_service_id, tag_id, hash_ids, filtered_hashes_generator: FilteredHashesGenerator ):
for ( file_service_id, filtered_hash_ids ) in filtered_hashes_generator.IterateHashes( hash_ids ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
ac_counts = collections.Counter()
self.modules_mappings_cache_specific_display.RescindPendingMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
self._ExecuteMany( 'DELETE FROM ' + cache_pending_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_deleted = self._GetRowCount()
if num_deleted > 0:
counts_cache_changes = [ ( tag_id, 0, num_deleted ) ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
| [
"[email protected]"
]
| |
cfe01345e37aadfec5a5a2ccb5e0ad6c4a9df927 | 9d278285f2bc899ac93ec887b1c31880ed39bf56 | /ondoc/doctor/migrations/0192_auto_20190125_1514.py | 3779ca4d7f242aa5bd1e5ad30a90d32209c5bc7d | []
| no_license | ronit29/docprime | 945c21f8787387b99e4916cb3ba1618bc2a85034 | 60d4caf6c52a8b70174a1f654bc792d825ba1054 | refs/heads/master | 2023-04-01T14:54:10.811765 | 2020-04-07T18:57:34 | 2020-04-07T18:57:34 | 353,953,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | # Generated by Django 2.0.5 on 2019-01-25 09:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('doctor', '0191_auto_20190124_1845'),
]
operations = [
migrations.AlterField(
model_name='cancellationreason',
name='type',
field=models.PositiveSmallIntegerField(blank=True, default=None, null=True),
),
]
| [
"[email protected]"
]
| |
4ae82fbb54695b12dbf2f6d5842e6919c8a8330b | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_97/1111.py | a46d75a4bf3135788b1b71ced5eaa6713ed67828 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | # Program to solve C. Recycled Numbers
def is_recycled_pair(a, b, call):
astr = str(a)
bstr = str(b)
if len(astr) != len(bstr) or len(astr) == 1:
return False
for i in range(1, len(astr)):
if astr == (bstr[len(astr) - i:] + bstr[:len(astr) - i]):
return True
if call == 1:
return is_recycled_pair(b, a, 2)
else:
return False
filename = "in.txt"
infile = open(filename, 'r')
outfile = open("output.txt", 'w')
first_line = True
case = 0
for line in infile:
if first_line:
first_line = False
continue
case += 1
start = int(line.split(" ")[0])
end = int(line.split(" ")[1])
if end <= start:
outfile.write("Case #" + str(case) + ": 0" + "\n")
continue
pair_count = 0
for n1 in range(start, end):
for n2 in range(n1 + 1, end + 1):
if is_recycled_pair(n1, n2, 1):
pair_count += 1
outfile.write("Case #" + str(case) + ": " + str(pair_count) + "\n")
infile.close()
outfile.close() | [
"[email protected]"
]
| |
8e5e8ca0317d169947d49cf752033de72b169638 | 0f4823e4e8dcedf64b0061c9f02d2bf4b410c0e0 | /autotest/t000_testscripts.py | 4394523e616b8f2de32d6d4ce65a26d645f47bda | [
"BSD-3-Clause"
]
| permissive | MJKnowling/flopy | cfa4383c8c834fbc57341511621d3f2401726224 | f480ff304e5728ccaa5e663d3fa77ec025cb0ba8 | refs/heads/master | 2021-09-20T23:57:13.032896 | 2017-12-01T18:57:09 | 2017-12-01T18:57:09 | 113,387,250 | 0 | 0 | null | 2017-12-07T01:33:03 | 2017-12-07T01:33:02 | null | UTF-8 | Python | false | false | 1,809 | py | # Remove the temp directory and then create a fresh one
from __future__ import print_function
import os
import sys
import shutil
exclude = ['flopy_swi2_ex2.py', 'flopy_swi2_ex5.py']
for arg in sys.argv:
if arg.lower() == '--all':
exclude = []
sdir = os.path.join('..', 'examples', 'scripts')
# make working directories
testdir = os.path.join('.', 'temp', 'scripts')
if os.path.isdir(testdir):
shutil.rmtree(testdir)
os.mkdir(testdir)
# add testdir to python path
sys.path.append(testdir)
def copy_scripts():
files = [f for f in os.listdir(sdir) if f.endswith('.py')]
# exclude unwanted files
for e in exclude:
if e in files:
files.remove(e)
# copy files
for fn in files:
pth = os.path.join(sdir, fn)
opth = os.path.join(testdir, fn)
# copy script
print('copying {} from {} to {}'.format(fn, sdir, testdir))
shutil.copyfile(pth, opth)
return files
def import_from(mod, name):
mod = __import__(mod)
main = getattr(mod, name)
return main
def run_scripts(fn):
# import run function from scripts
s = os.path.splitext(fn)[0]
run = import_from(s, 'run')
# change to working directory
opth = os.getcwd()
print('changing to working directory "{}"'.format(testdir))
os.chdir(testdir)
# run the script
ival = run()
# change back to starting directory
print('changing back to starting directory "{}"'.format(opth))
os.chdir(opth)
# make sure script ran successfully
assert ival == 0, 'could not run {}'.format(fn)
def test_notebooks():
files = copy_scripts()
for fn in files:
yield run_scripts, fn
if __name__ == '__main__':
files = copy_scripts()
print(files)
for fn in files:
run_scripts(fn)
| [
"[email protected]"
]
| |
9978938d6c89dfc4cbef5d0b474f6ea5d568ee40 | 04fb46ffbf635ca5090b860e39098c366a3a84e4 | /fpga/mqnic/fb2CG/fpga_100g/tb/fpga_core/test_fpga_core.py | f115c4c4cb3c16eeba80e3ad6828388750820cf1 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
]
| permissive | yangcc2019/corundum | 46b7c7126973976617e065373bb7666df71cdc3c | 7c8abe261b2ec3e653da7bc881f769668a231bde | refs/heads/master | 2023-02-24T11:17:47.471315 | 2021-02-02T05:55:07 | 2021-02-02T05:55:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,033 | py | """
Copyright 2020, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
"""
import logging
import os
import sys
import scapy.utils
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
import cocotb_test.simulator
import cocotb
from cocotb.log import SimLog
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, FallingEdge, Timer
from cocotbext.pcie.core import RootComplex
from cocotbext.pcie.xilinx.us import UltraScalePlusPcieDevice
from cocotbext.axi import AxiStreamSource, AxiStreamSink
try:
import mqnic
except ImportError:
# attempt import from current directory
sys.path.insert(0, os.path.join(os.path.dirname(__file__)))
try:
import mqnic
finally:
del sys.path[0]
class TB(object):
def __init__(self, dut):
self.dut = dut
self.BAR0_APERTURE = int(os.getenv("PARAM_BAR0_APERTURE"))
self.log = SimLog("cocotb.tb")
self.log.setLevel(logging.DEBUG)
# PCIe
self.rc = RootComplex()
self.rc.max_payload_size = 0x1 # 256 bytes
self.rc.max_read_request_size = 0x2 # 512 bytes
self.dev = UltraScalePlusPcieDevice(
# configuration options
pcie_generation=3,
pcie_link_width=16,
user_clk_frequency=250e6,
alignment="dword",
cq_cc_straddle=False,
rq_rc_straddle=False,
rc_4tlp_straddle=False,
enable_pf1=False,
enable_client_tag=True,
enable_extended_tag=True,
enable_parity=False,
enable_rx_msg_interface=False,
enable_sriov=False,
enable_extended_configuration=False,
enable_pf0_msi=True,
enable_pf1_msi=False,
# signals
# Clock and Reset Interface
user_clk=dut.clk_250mhz,
user_reset=dut.rst_250mhz,
# user_lnk_up
# sys_clk
# sys_clk_gt
# sys_reset
# phy_rdy_out
# Requester reQuest Interface
rq_entity=dut,
rq_name="m_axis_rq",
pcie_rq_seq_num0=dut.s_axis_rq_seq_num_0,
pcie_rq_seq_num_vld0=dut.s_axis_rq_seq_num_valid_0,
pcie_rq_seq_num1=dut.s_axis_rq_seq_num_1,
pcie_rq_seq_num_vld1=dut.s_axis_rq_seq_num_valid_1,
# pcie_rq_tag0
# pcie_rq_tag1
# pcie_rq_tag_av
# pcie_rq_tag_vld0
# pcie_rq_tag_vld1
# Requester Completion Interface
rc_entity=dut,
rc_name="s_axis_rc",
# Completer reQuest Interface
cq_entity=dut,
cq_name="s_axis_cq",
# pcie_cq_np_req
# pcie_cq_np_req_count
# Completer Completion Interface
cc_entity=dut,
cc_name="m_axis_cc",
# Transmit Flow Control Interface
# pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
# pcie_tfc_npd_av=dut.pcie_tfc_npd_av,
# Configuration Management Interface
cfg_mgmt_addr=dut.cfg_mgmt_addr,
cfg_mgmt_function_number=dut.cfg_mgmt_function_number,
cfg_mgmt_write=dut.cfg_mgmt_write,
cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
cfg_mgmt_read=dut.cfg_mgmt_read,
cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
# cfg_mgmt_debug_access
# Configuration Status Interface
# cfg_phy_link_down
# cfg_phy_link_status
# cfg_negotiated_width
# cfg_current_speed
cfg_max_payload=dut.cfg_max_payload,
cfg_max_read_req=dut.cfg_max_read_req,
# cfg_function_status
# cfg_vf_status
# cfg_function_power_state
# cfg_vf_power_state
# cfg_link_power_state
# cfg_err_cor_out
# cfg_err_nonfatal_out
# cfg_err_fatal_out
# cfg_local_error_out
# cfg_local_error_valid
# cfg_rx_pm_state
# cfg_tx_pm_state
# cfg_ltssm_state
# cfg_rcb_status
# cfg_obff_enable
# cfg_pl_status_change
# cfg_tph_requester_enable
# cfg_tph_st_mode
# cfg_vf_tph_requester_enable
# cfg_vf_tph_st_mode
# Configuration Received Message Interface
# cfg_msg_received
# cfg_msg_received_data
# cfg_msg_received_type
# Configuration Transmit Message Interface
# cfg_msg_transmit
# cfg_msg_transmit_type
# cfg_msg_transmit_data
# cfg_msg_transmit_done
# Configuration Flow Control Interface
cfg_fc_ph=dut.cfg_fc_ph,
cfg_fc_pd=dut.cfg_fc_pd,
cfg_fc_nph=dut.cfg_fc_nph,
cfg_fc_npd=dut.cfg_fc_npd,
cfg_fc_cplh=dut.cfg_fc_cplh,
cfg_fc_cpld=dut.cfg_fc_cpld,
cfg_fc_sel=dut.cfg_fc_sel,
# Configuration Control Interface
# cfg_hot_reset_in
# cfg_hot_reset_out
# cfg_config_space_enable
# cfg_dsn
# cfg_bus_number
# cfg_ds_port_number
# cfg_ds_bus_number
# cfg_ds_device_number
# cfg_ds_function_number
# cfg_power_state_change_ack
# cfg_power_state_change_interrupt
cfg_err_cor_in=dut.status_error_cor,
cfg_err_uncor_in=dut.status_error_uncor,
# cfg_flr_in_process
# cfg_flr_done
# cfg_vf_flr_in_process
# cfg_vf_flr_func_num
# cfg_vf_flr_done
# cfg_pm_aspm_l1_entry_reject
# cfg_pm_aspm_tx_l0s_entry_disable
# cfg_req_pm_transition_l23_ready
# cfg_link_training_enable
# Configuration Interrupt Controller Interface
# cfg_interrupt_int
# cfg_interrupt_sent
# cfg_interrupt_pending
cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable,
cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable,
cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update,
cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data,
# cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select,
cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int,
cfg_interrupt_msi_pending_status=dut.cfg_interrupt_msi_pending_status,
cfg_interrupt_msi_pending_status_data_enable=dut.cfg_interrupt_msi_pending_status_data_enable,
# cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num,
cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent,
cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail,
# cfg_interrupt_msix_enable
# cfg_interrupt_msix_mask
# cfg_interrupt_msix_vf_enable
# cfg_interrupt_msix_vf_mask
# cfg_interrupt_msix_address
# cfg_interrupt_msix_data
# cfg_interrupt_msix_int
# cfg_interrupt_msix_vec_pending
# cfg_interrupt_msix_vec_pending_status
cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr,
cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present,
cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type,
# cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag,
# cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,
# Configuration Extend Interface
# cfg_ext_read_received
# cfg_ext_write_received
# cfg_ext_register_number
# cfg_ext_function_number
# cfg_ext_write_data
# cfg_ext_write_byte_enable
# cfg_ext_read_data
# cfg_ext_read_data_valid
)
# self.dev.log.setLevel(logging.DEBUG)
self.rc.make_port().connect(self.dev)
self.driver = mqnic.Driver(self.rc)
self.dev.functions[0].msi_multiple_message_capable = 5
self.dev.functions[0].configure_bar(0, 2**self.BAR0_APERTURE, ext=True, prefetch=True)
# Ethernet
cocotb.fork(Clock(dut.qsfp_0_rx_clk, 3.102, units="ns").start())
self.qsfp_0_source = AxiStreamSource(dut, "qsfp_0_rx_axis", dut.qsfp_0_rx_clk, dut.qsfp_0_rx_rst)
cocotb.fork(Clock(dut.qsfp_0_tx_clk, 3.102, units="ns").start())
self.qsfp_0_sink = AxiStreamSink(dut, "qsfp_0_tx_axis", dut.qsfp_0_tx_clk, dut.qsfp_0_tx_rst)
cocotb.fork(Clock(dut.qsfp_1_rx_clk, 3.102, units="ns").start())
self.qsfp_1_source = AxiStreamSource(dut, "qsfp_1_rx_axis", dut.qsfp_1_rx_clk, dut.qsfp_1_rx_rst)
cocotb.fork(Clock(dut.qsfp_1_tx_clk, 3.102, units="ns").start())
self.qsfp_1_sink = AxiStreamSink(dut, "qsfp_1_tx_axis", dut.qsfp_1_tx_clk, dut.qsfp_1_tx_rst)
dut.qsfp_0_i2c_scl_i.setimmediatevalue(1)
dut.qsfp_0_i2c_sda_i.setimmediatevalue(1)
dut.qsfp_0_intr_n.setimmediatevalue(1)
dut.qsfp_0_mod_prsnt_n.setimmediatevalue(0)
dut.qsfp_1_i2c_scl_i.setimmediatevalue(1)
dut.qsfp_1_i2c_sda_i.setimmediatevalue(1)
dut.qsfp_1_intr_n.setimmediatevalue(1)
dut.qsfp_1_mod_prsnt_n.setimmediatevalue(0)
dut.qspi_dq_i.setimmediatevalue(0)
dut.pps_in.setimmediatevalue(0)
self.loopback_enable = False
cocotb.fork(self._run_loopback())
async def init(self):
self.dut.qsfp_0_rx_rst.setimmediatevalue(0)
self.dut.qsfp_0_tx_rst.setimmediatevalue(0)
self.dut.qsfp_1_rx_rst.setimmediatevalue(0)
self.dut.qsfp_1_tx_rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk_250mhz)
await RisingEdge(self.dut.clk_250mhz)
self.dut.qsfp_0_rx_rst.setimmediatevalue(1)
self.dut.qsfp_0_tx_rst.setimmediatevalue(1)
self.dut.qsfp_1_rx_rst.setimmediatevalue(1)
self.dut.qsfp_1_tx_rst.setimmediatevalue(1)
await FallingEdge(self.dut.rst_250mhz)
await Timer(100, 'ns')
await RisingEdge(self.dut.clk_250mhz)
await RisingEdge(self.dut.clk_250mhz)
self.dut.qsfp_0_rx_rst.setimmediatevalue(0)
self.dut.qsfp_0_tx_rst.setimmediatevalue(0)
self.dut.qsfp_1_rx_rst.setimmediatevalue(0)
self.dut.qsfp_1_tx_rst.setimmediatevalue(0)
await self.rc.enumerate(enable_bus_mastering=True, configure_msi=True)
async def _run_loopback(self):
while True:
await RisingEdge(self.dut.clk_250mhz)
if self.loopback_enable:
if not self.qsfp_0_sink.empty():
await self.qsfp_0_source.send(await self.qsfp_0_sink.recv())
if not self.qsfp_1_sink.empty():
await self.qsfp_1_source.send(await self.qsfp_1_sink.recv())
@cocotb.test()
async def run_test_nic(dut):
tb = TB(dut)
await tb.init()
tb.log.info("Init driver")
await tb.driver.init_dev(tb.dev.functions[0].pcie_id)
await tb.driver.interfaces[0].open()
# await driver.interfaces[1].open()
# enable queues
tb.log.info("Enable queues")
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_SCHED_ENABLE, 0x00000001)
for k in range(tb.driver.interfaces[0].tx_queue_count):
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].schedulers[0].hw_addr+4*k, 0x00000003)
# wait for all writes to complete
await tb.rc.mem_read(tb.driver.hw_addr, 4)
tb.log.info("Init complete")
tb.log.info("Send and receive single packet")
data = bytearray([x % 256 for x in range(1024)])
await tb.driver.interfaces[0].start_xmit(data, 0)
pkt = await tb.qsfp_0_sink.recv()
tb.log.info("Packet: %s", pkt)
await tb.qsfp_0_source.send(pkt)
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
# await tb.driver.interfaces[1].start_xmit(data, 0)
# pkt = await tb.qsfp_1_0_sink.recv()
# tb.log.info("Packet: %s", pkt)
# await tb.qsfp_1_0_source.send(pkt)
# pkt = await tb.driver.interfaces[1].recv()
# tb.log.info("Packet: %s", pkt)
# assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.log.info("RX and TX checksum tests")
payload = bytes([x % 256 for x in range(256)])
eth = Ether(src='5A:51:52:53:54:55', dst='DA:D1:D2:D3:D4:D5')
ip = IP(src='192.168.1.100', dst='192.168.1.101')
udp = UDP(sport=1, dport=2)
test_pkt = eth / ip / udp / payload
test_pkt2 = test_pkt.copy()
test_pkt2[UDP].chksum = scapy.utils.checksum(bytes(test_pkt2[UDP]))
await tb.driver.interfaces[0].start_xmit(test_pkt2.build(), 0, 34, 6)
pkt = await tb.qsfp_0_sink.recv()
tb.log.info("Packet: %s", pkt)
await tb.qsfp_0_source.send(pkt)
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
assert Ether(pkt.data).build() == test_pkt.build()
tb.log.info("Multiple small packets")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(60)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
tb.log.info("Multiple large packets")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(1514)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
tb.log.info("Jumbo frames")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(9014)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
await RisingEdge(dut.clk_250mhz)
await RisingEdge(dut.clk_250mhz)
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axi_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axi', 'rtl'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axis', 'rtl'))
eth_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'eth', 'rtl'))
pcie_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'pcie', 'rtl'))
def test_fpga_core(request):
dut = "fpga_core"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, "common", "mqnic_interface.v"),
os.path.join(rtl_dir, "common", "mqnic_port.v"),
os.path.join(rtl_dir, "common", "cpl_write.v"),
os.path.join(rtl_dir, "common", "cpl_op_mux.v"),
os.path.join(rtl_dir, "common", "desc_fetch.v"),
os.path.join(rtl_dir, "common", "desc_op_mux.v"),
os.path.join(rtl_dir, "common", "queue_manager.v"),
os.path.join(rtl_dir, "common", "cpl_queue_manager.v"),
os.path.join(rtl_dir, "common", "tx_engine.v"),
os.path.join(rtl_dir, "common", "rx_engine.v"),
os.path.join(rtl_dir, "common", "tx_checksum.v"),
os.path.join(rtl_dir, "common", "rx_hash.v"),
os.path.join(rtl_dir, "common", "rx_checksum.v"),
os.path.join(rtl_dir, "common", "tx_scheduler_rr.v"),
os.path.join(rtl_dir, "common", "event_mux.v"),
os.path.join(rtl_dir, "common", "tdma_scheduler.v"),
os.path.join(rtl_dir, "common", "tdma_ber.v"),
os.path.join(rtl_dir, "common", "tdma_ber_ch.v"),
os.path.join(eth_rtl_dir, "ptp_clock.v"),
os.path.join(eth_rtl_dir, "ptp_clock_cdc.v"),
os.path.join(eth_rtl_dir, "ptp_perout.v"),
os.path.join(eth_rtl_dir, "ptp_ts_extract.v"),
os.path.join(axi_rtl_dir, "axil_interconnect.v"),
os.path.join(axi_rtl_dir, "arbiter.v"),
os.path.join(axi_rtl_dir, "priority_encoder.v"),
os.path.join(axis_rtl_dir, "axis_adapter.v"),
os.path.join(axis_rtl_dir, "axis_arb_mux.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo_adapter.v"),
os.path.join(axis_rtl_dir, "axis_fifo.v"),
os.path.join(axis_rtl_dir, "axis_register.v"),
os.path.join(pcie_rtl_dir, "pcie_us_axil_master.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us_rd.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us_wr.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux_rd.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux_wr.v"),
os.path.join(pcie_rtl_dir, "dma_psdpram.v"),
os.path.join(pcie_rtl_dir, "dma_client_axis_sink.v"),
os.path.join(pcie_rtl_dir, "dma_client_axis_source.v"),
os.path.join(pcie_rtl_dir, "pcie_us_cfg.v"),
os.path.join(pcie_rtl_dir, "pcie_us_msi.v"),
os.path.join(pcie_rtl_dir, "pcie_tag_manager.v"),
os.path.join(pcie_rtl_dir, "pulse_merge.v"),
]
parameters = {}
parameters['AXIS_PCIE_DATA_WIDTH'] = 512
parameters['AXIS_PCIE_KEEP_WIDTH'] = parameters['AXIS_PCIE_DATA_WIDTH'] // 32
parameters['AXIS_PCIE_RQ_USER_WIDTH'] = 62 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 137
parameters['AXIS_PCIE_RC_USER_WIDTH'] = 75 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 161
parameters['AXIS_PCIE_CQ_USER_WIDTH'] = 88 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 183
parameters['AXIS_PCIE_CC_USER_WIDTH'] = 33 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 81
parameters['RQ_SEQ_NUM_WIDTH'] = 6
parameters['BAR0_APERTURE'] = 24
parameters['AXIS_ETH_DATA_WIDTH'] = 512
parameters['AXIS_ETH_KEEP_WIDTH'] = parameters['AXIS_ETH_DATA_WIDTH'] // 8
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| [
"[email protected]"
]
| |
eb239b21952da625554fc6c3c1b389fd1c3d1bfe | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03293/s450382229.py | 6fbb1ed548a962cfbf8b0a5267d97027111aea93 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | S,T=input(),input()
for i in range(len(T)):
if S==T:print('Yes');exit()
S=S[-1]+S[0:-1]
print('No') | [
"[email protected]"
]
| |
95b54f2914a61f9a045c2fd26d9d46b9767a42c4 | 0b953c73458679beeef3b95f366601c834cff9b4 | /hunter/longest palindrome substring within string.py | 9f907b18b130617c943cd267d9545d83c25ece09 | []
| no_license | Sravaniram/Python-Programming | 41531de40e547f0f461e77b88e4c0d562faa041c | f6f2a4e3a6274ecab2795062af8899c2a06c9dc1 | refs/heads/master | 2020-04-11T12:49:18.677561 | 2018-06-04T18:04:13 | 2018-06-04T18:04:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | a=raw_input()
l=[]
m=[]
for i in range(0,len(a)):
for j in range(i,len(a)):
z=a[i:j+1]
y=z[::-1]
if z==y:
l.append(z)
m.append(len(z))
y=max(m)
for i in range(0,len(a)):
if m[i]==y:
print l[i]
break
| [
"[email protected]"
]
| |
aaa1bebc04e41b15d7bbd59b3e874ecfad08e1e6 | ebde1fadfbe336fa52bc20c8a2f74de8d1d90cf3 | /src/moca_modules/moca_share/__init__.py | 53ea1bfc999d299bbc568895421ca67f221548ec | [
"MIT"
]
| permissive | el-ideal-ideas/MocaTwitterUtils | be2481ce9eb0f9e53e8e0bd54b1b265c80e4f959 | 544a260600ade1b8cd4e0a2d2967c2fb6a8f38d3 | refs/heads/master | 2023-02-18T23:27:31.056121 | 2021-01-23T07:41:16 | 2021-01-23T07:41:16 | 321,014,400 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # -- Imports --------------------------------------------------------------------------
from .MocaMultiProcessLock import MocaMultiProcessLock
from .MocaSharedMemory import MocaSharedMemory
# -------------------------------------------------------------------------- Imports --
"""
This module can share data between processes.
Requirements
------------
None
""" | [
"[email protected]"
]
| |
81b6659ce41232ce1546045cddc849edadb44f22 | 3a2af7b4b801d9ba8d78713dcd1ed57ee35c0992 | /zerver/webhooks/errbit/view.py | a47ccae2f0fc9f5a3b1841a1b5be747b0a7ea1b3 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
]
| permissive | timabbott/zulip | 2b69bd3bb63539adbfc4c732a3ff9d52657f40ac | 42f239915526180a1a0cd6c3761c0efcd13ffe6f | refs/heads/master | 2023-08-30T21:45:39.197724 | 2020-02-13T23:09:22 | 2020-06-25T21:46:33 | 43,171,533 | 6 | 9 | Apache-2.0 | 2020-02-24T20:12:52 | 2015-09-25T19:34:16 | Python | UTF-8 | Python | false | false | 1,333 | py | from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
ERRBIT_TOPIC_TEMPLATE = '{project_name}'
ERRBIT_MESSAGE_TEMPLATE = '[{error_class}]({error_url}): "{error_message}" occurred.'
@api_key_only_webhook_view('Errbit')
@has_request_variables
def api_errbit_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
subject = get_subject(payload)
body = get_body(payload)
check_send_webhook_message(request, user_profile, subject, body)
return json_success()
def get_subject(payload: Dict[str, Any]) -> str:
project = payload['problem']['app_name'] + ' / ' + payload['problem']['environment']
return ERRBIT_TOPIC_TEMPLATE.format(project_name=project)
def get_body(payload: Dict[str, Any]) -> str:
data = {
'error_url': payload['problem']['url'],
'error_class': payload['problem']['error_class'],
'error_message': payload['problem']['message'],
}
return ERRBIT_MESSAGE_TEMPLATE.format(**data)
| [
"[email protected]"
]
| |
b157b3943a5da0075b79e5476fd9dc13cb5f888d | f0e25779a563c2d570cbc22687c614565501130a | /Think_Python/rotate.py | 88a2a43db71c667c9424a08799bd16968e7efbd5 | []
| no_license | XyK0907/for_work | 8dcae9026f6f25708c14531a83a6593c77b38296 | 85f71621c54f6b0029f3a2746f022f89dd7419d9 | refs/heads/master | 2023-04-25T04:18:44.615982 | 2021-05-15T12:10:26 | 2021-05-15T12:10:26 | 293,845,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | """This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import string
def rotate_letter(letter, n):
"""Rotates a letter by n places. Does not change other chars.
letter: single-letter string
n: int
Returns: single-letter string
"""
if letter.isupper():
start = ord('A')
elif letter.islower():
start = ord('a')
else:
return letter
c = ord(letter) - start
i = (c + n) % 26 + start
return chr(i)
def rotate_word(word, n):
"""Rotates a word by n places.
word: string
n: integer
Returns: string
"""
res = ''
for letter in word:
res += rotate_letter(letter, n)
return res
if __name__ == '__main__':
print(rotate_word('cheer', 7))
print(rotate_word('melon', -10))
print(rotate_word('sleep', 9)) | [
"[email protected]"
]
| |
77ab9cecf9571229a858bc319ec4530650f8d96c | 4a48593a04284ef997f377abee8db61d6332c322 | /python/opencv/opencv_2/gui/opencv_with_tkinter.py | c38c3d8a121d82026b7644085f0fe74574998ae3 | [
"MIT"
]
| permissive | jeremiedecock/snippets | 8feaed5a8d873d67932ef798e16cb6d2c47609f0 | b90a444041c42d176d096fed14852d20d19adaa7 | refs/heads/master | 2023-08-31T04:28:09.302968 | 2023-08-21T07:22:38 | 2023-08-21T07:22:38 | 36,926,494 | 26 | 9 | MIT | 2023-06-06T02:17:44 | 2015-06-05T10:19:09 | Python | UTF-8 | Python | false | false | 3,408 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Trackbar widget.
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_trackbar/py_trackbar.html#trackbar
WARNING: Tkinter doesn't work if it's run outside the main thread!
See: http://stackoverflow.com/questions/10556479/running-a-tkinter-form-in-a-separate-thread
"Tkinter isn't thread safe, and the general consensus is that Tkinter
doesn't work in a non-main thread. If you rewrite your code so that Tkinter
runs in the main thread, you can have your workers run in other threads."
"""
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import Tkinter as tk
import threading
def trackbar1_cb(x):
pass
def trackbar2_cb(x):
pass
#def scale_cb(ev=None):
# print(scale.get())
def main():
# Parse the programm options (get the path of the image file to read) #####
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--cameraid", "-i", help="The camera ID number (default: 0)", type=int, default=0, metavar="INTEGER")
args = parser.parse_args()
device_number = args.cameraid
# TkInter #################################################################
root = tk.Tk()
root.geometry("500x75") # Set the size of the "root" window
# See: http://effbot.org/tkinterbook/scale.htm
scale = tk.Scale(root, from_=0, to=255, orient=tk.HORIZONTAL)
#scale = tk.Scale(root, from_=0, to=255, orient=tk.HORIZONTAL, command=scale_cb)
scale.pack(fill=tk.X, expand=1)
# OpenCV ##################################################################
video_capture = cv.VideoCapture(device_number)
# Create a window
window_name = "Threshold Bin"
cv.namedWindow(window_name)
print("Press q to quit.")
def opencv_main_loop():
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_bgr = video_capture.read()
# IMAGE PROCESSING ################################
# Convert BGR color space to Grayscale
img_gray = cv.cvtColor(img_bgr, cv.COLOR_BGR2GRAY)
# Threshold the Grayscale image: dst_i = (src_i > threshold_value) ? max_val : 0
threshold_value = scale.get()
max_val = 255
ret, img_threshold_bin = cv.threshold(img_gray, threshold_value, max_val, cv.THRESH_BINARY)
# DISPLAY IMAGES ##################################
# Display the resulting frame (BGR)
cv.imshow('BGR (orignal)', img_bgr)
# Display the resulting frames (Threshold)
cv.imshow(window_name, img_threshold_bin)
# KEYBOARD LISTENER ###############################
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
# Run the OpenCV main loop in a separate thread
thread_cv = threading.Thread(target=opencv_main_loop)
thread_cv.start()
# Run the tkinter main loop
root.mainloop()
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
05569309e30bae8fa01d77141b06eb6f922b24e6 | 43c24c890221d6c98e4a45cd63dba4f1aa859f55 | /test/tests/os_test.py | cb10509f1d7cdb4b47c62f144aadf5f27e252502 | [
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
]
| permissive | jmgc/pyston | c8e4df03c33c6b81d20b7d51a781d9e10148238e | 9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f | refs/heads/master | 2020-12-11T07:51:58.968440 | 2020-09-11T14:38:38 | 2020-09-11T14:38:38 | 39,242,644 | 0 | 0 | NOASSERTION | 2020-09-11T14:38:39 | 2015-07-17T08:09:31 | Python | UTF-8 | Python | false | false | 673 | py | #
# currently broken:
# import os.path
import os
r1 = os.urandom(8)
r2 = os.urandom(8)
print len(r1), len(r2), type(r1), type(r2), r1 == r2
print type(os.stat("/dev/null"))
print os.path.expanduser("~") == os.environ["HOME"]
print os.path.isfile("/dev/null")
print os.path.isfile("/should_not_exist!")
e = OSError(1, 2, 3)
print e
print e.errno
print e.strerror
print e.filename
print OSError(1, 2).filename
try:
os.execvp("aoeuaoeu", ['aoeuaoeu'])
except OSError, e:
print e
# Changes to os.environ should show up in subprocesses:
import subprocess
env = os.environ
env["PYTHONPATH"] = "."
subprocess.check_call("echo PYTHONPATH is $PYTHONPATH", shell=1)
| [
"[email protected]"
]
| |
4b12cb36cca7db69add9afd812f75a2819c4b7f7 | 2ec26d004a653c0576594e48ac13dd71f539b30a | /crikey/conditional_audio/fruit_binned_slow_mse/fruitspeecher_binned_slow_mse.py | c5d16460509e10da90e3e3c6c64df5e5c6b1c737 | []
| no_license | kastnerkyle/research_megarepo | 6aca5b2c3b2413e0def1093b23f2826e3e7e5e97 | ab182667650fd59b99f75d4b599d7ace77a3f30b | refs/heads/master | 2021-01-17T20:31:52.250050 | 2016-12-27T01:28:54 | 2016-12-27T01:28:54 | 68,341,074 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 24,969 | py | import numpy as np
import theano
from theano import tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from scipy.io import wavfile
import os
import sys
from kdllib import load_checkpoint, theano_one_hot, concatenate
from kdllib import fetch_fruitspeech_spectrogram, list_iterator
from kdllib import np_zeros, GRU, GRUFork, dense_to_one_hot
from kdllib import make_weights, make_biases, relu, run_loop
from kdllib import as_shared, adam, gradient_clipping
from kdllib import get_values_from_function, set_shared_variables_in_function
from kdllib import soundsc, categorical_crossentropy
from kdllib import sample_binomial, sigmoid
if __name__ == "__main__":
import argparse
speech = fetch_fruitspeech_spectrogram()
X = speech["data"]
y = speech["target"]
vocabulary = speech["vocabulary"]
vocabulary_size = speech["vocabulary_size"]
reconstruct = speech["reconstruct"]
fs = speech["sample_rate"]
X = np.array([x.astype(theano.config.floatX) for x in X])
y = np.array([yy.astype(theano.config.floatX) for yy in y])
minibatch_size = 1
n_epochs = 200 # Used way at the bottom in the training loop!
checkpoint_every_n = 10
cut_len = 41 # Used way at the bottom in the training loop!
random_state = np.random.RandomState(1999)
train_itr = list_iterator([X, y], minibatch_size, axis=1,
stop_index=105, randomize=True, make_mask=True)
valid_itr = list_iterator([X, y], minibatch_size, axis=1,
start_index=80, randomize=True, make_mask=True)
X_mb, X_mb_mask, c_mb, c_mb_mask = next(train_itr)
train_itr.reset()
n_hid = 256
att_size = 10
n_proj = 256
n_v_proj = 5
n_bins = 10
input_dim = X_mb.shape[-1]
n_pred_proj = 1
n_feats = X_mb.shape[-1]
n_chars = vocabulary_size
# n_components = 3
# n_density = 2 * n_out * n_components + n_components
desc = "Speech generation"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-s', '--sample',
help='Sample from a checkpoint file',
default=None,
required=False)
parser.add_argument('-p', '--plot',
help='Plot training curves from a checkpoint file',
default=None,
required=False)
parser.add_argument('-w', '--write',
help='The string to write out (default first minibatch)',
default=None,
required=False)
def restricted_int(x):
if x is None:
# None makes it "auto" sample
return x
x = int(x)
if x < 1:
raise argparse.ArgumentTypeError("%r not range [1, inf]" % (x,))
return x
parser.add_argument('-sl', '--sample_length',
help='Number of steps to sample, default is automatic',
type=restricted_int,
default=None,
required=False)
parser.add_argument('-c', '--continue', dest="cont",
help='Continue training from another saved model',
default=None,
required=False)
args = parser.parse_args()
if args.plot is not None or args.sample is not None:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
if args.sample is not None:
checkpoint_file = args.sample
else:
checkpoint_file = args.plot
if not os.path.exists(checkpoint_file):
raise ValueError("Checkpoint file path %s" % checkpoint_file,
" does not exist!")
print(checkpoint_file)
checkpoint_dict = load_checkpoint(checkpoint_file)
train_costs = checkpoint_dict["train_costs"]
valid_costs = checkpoint_dict["valid_costs"]
plt.plot(train_costs)
plt.plot(valid_costs)
plt.savefig("costs.png")
X_mb, X_mb_mask, c_mb, c_mb_mask = next(valid_itr)
valid_itr.reset()
prev_h1, prev_h2, prev_h3 = [np_zeros((minibatch_size, n_hid))
for i in range(3)]
prev_kappa = np_zeros((minibatch_size, att_size))
prev_w = np_zeros((minibatch_size, n_chars))
if args.sample is not None:
predict_function = checkpoint_dict["predict_function"]
attention_function = checkpoint_dict["attention_function"]
sample_function = checkpoint_dict["sample_function"]
if args.write is not None:
sample_string = args.write
print("Sampling using sample string %s" % sample_string)
oh = dense_to_one_hot(
np.array([vocabulary[c] for c in sample_string]),
vocabulary_size)
c_mb = np.zeros(
(len(oh), minibatch_size, oh.shape[-1])).astype(c_mb.dtype)
c_mb[:len(oh), :, :] = oh[:, None, :]
c_mb = c_mb[:len(oh)]
c_mb_mask = np.ones_like(c_mb[:, :, 0])
if args.sample_length is None:
raise ValueError("NYI - use -sl or --sample_length ")
else:
fixed_steps = args.sample_length
completed = []
init_x = np.zeros_like(X_mb[0])
for i in range(fixed_steps):
rvals = sample_function(init_x, c_mb, c_mb_mask, prev_h1, prev_h2,
prev_h3, prev_kappa, prev_w)
sampled, h1_s, h2_s, h3_s, k_s, w_s, stop_s, stop_h = rvals
completed.append(sampled)
# cheating sampling...
#init_x = X_mb[i]
init_x = sampled
prev_h1 = h1_s
prev_h2 = h2_s
prev_h3 = h3_s
prev_kappa = k_s
prev_w = w_s
cond = c_mb
print("Completed sampling after %i steps" % fixed_steps)
completed = np.array(completed).transpose(1, 0, 2)
rlookup = {v: k for k, v in vocabulary.items()}
all_strings = []
for yi in y:
ex_str = "".join([rlookup[c]
for c in np.argmax(yi, axis=1)])
all_strings.append(ex_str)
for i in range(len(completed)):
ex = completed[i]
ex_str = "".join([rlookup[c]
for c in np.argmax(cond[:, i], axis=1)])
s = "gen_%s_%i.wav" % (ex_str, i)
ii = reconstruct(ex)
wavfile.write(s, fs, soundsc(ii))
if ex_str in all_strings:
inds = [n for n, s in enumerate(all_strings)
if ex_str == s]
ind = inds[0]
it = reconstruct(X[ind])
s = "orig_%s_%i.wav" % (ex_str, i)
wavfile.write(s, fs, soundsc(it))
valid_itr.reset()
print("Sampling complete, exiting...")
sys.exit()
else:
print("No plotting arguments, starting training mode!")
X_sym = tensor.tensor3("X_sym")
X_sym.tag.test_value = X_mb
X_mask_sym = tensor.matrix("X_mask_sym")
X_mask_sym.tag.test_value = X_mb_mask
c_sym = tensor.tensor3("c_sym")
c_sym.tag.test_value = c_mb
c_mask_sym = tensor.matrix("c_mask_sym")
c_mask_sym.tag.test_value = c_mb_mask
init_h1 = tensor.matrix("init_h1")
init_h1.tag.test_value = np_zeros((minibatch_size, n_hid))
init_h2 = tensor.matrix("init_h2")
init_h2.tag.test_value = np_zeros((minibatch_size, n_hid))
init_h3 = tensor.matrix("init_h3")
init_h3.tag.test_value = np_zeros((minibatch_size, n_hid))
init_kappa = tensor.matrix("init_kappa")
init_kappa.tag.test_value = np_zeros((minibatch_size, att_size))
init_w = tensor.matrix("init_w")
init_w.tag.test_value = np_zeros((minibatch_size, n_chars))
params = []
biases = []
cell1 = GRU(input_dim, n_hid, random_state)
cell2 = GRU(n_hid, n_hid, random_state)
cell3 = GRU(n_hid, n_hid, random_state)
params += cell1.get_params()
params += cell2.get_params()
params += cell3.get_params()
inp_to_h1 = GRUFork(input_dim, n_hid, random_state)
inp_to_h2 = GRUFork(input_dim, n_hid, random_state)
inp_to_h3 = GRUFork(input_dim, n_hid, random_state)
att_to_h1 = GRUFork(n_chars, n_hid, random_state)
att_to_h2 = GRUFork(n_chars, n_hid, random_state)
att_to_h3 = GRUFork(n_chars, n_hid, random_state)
h1_to_h2 = GRUFork(n_hid, n_hid, random_state)
h1_to_h3 = GRUFork(n_hid, n_hid, random_state)
h2_to_h3 = GRUFork(n_hid, n_hid, random_state)
params += inp_to_h1.get_params()
params += inp_to_h2.get_params()
params += inp_to_h3.get_params()
params += att_to_h1.get_params()
params += att_to_h2.get_params()
params += att_to_h3.get_params()
params += h1_to_h2.get_params()
params += h1_to_h3.get_params()
params += h2_to_h3.get_params()
biases += inp_to_h1.get_biases()
biases += inp_to_h2.get_biases()
biases += inp_to_h3.get_biases()
biases += att_to_h1.get_biases()
biases += att_to_h2.get_biases()
biases += att_to_h3.get_biases()
biases += h1_to_h2.get_biases()
biases += h1_to_h3.get_biases()
biases += h2_to_h3.get_biases()
# 3 to include groundtruth, pixel RNN style
outs_to_v_h1 = GRUFork(3, n_v_proj, random_state)
params += outs_to_v_h1.get_params()
biases += outs_to_v_h1.get_biases()
v_cell1 = GRU(n_v_proj, n_v_proj, random_state)
params += v_cell1.get_params()
h1_to_att_a, h1_to_att_b, h1_to_att_k = make_weights(n_hid, 3 * [att_size],
random_state)
h1_to_outs, = make_weights(n_hid, [n_proj], random_state)
h2_to_outs, = make_weights(n_hid, [n_proj], random_state)
h3_to_outs, = make_weights(n_hid, [n_proj], random_state)
params += [h1_to_att_a, h1_to_att_b, h1_to_att_k]
params += [h1_to_outs, h2_to_outs, h3_to_outs]
pred_proj, = make_weights(n_v_proj, [n_pred_proj], random_state)
pred_b, = make_biases([n_pred_proj])
params += [pred_proj, pred_b]
biases += [pred_b]
inpt = X_sym[:-1]
target = X_sym[1:]
mask = X_mask_sym[1:]
context = c_sym * c_mask_sym.dimshuffle(0, 1, 'x')
inp_h1, inpgate_h1 = inp_to_h1.proj(inpt)
inp_h2, inpgate_h2 = inp_to_h2.proj(inpt)
inp_h3, inpgate_h3 = inp_to_h3.proj(inpt)
u = tensor.arange(c_sym.shape[0]).dimshuffle('x', 'x', 0)
u = tensor.cast(u, theano.config.floatX)
def calc_phi(k_t, a_t, b_t, u_c):
a_t = a_t.dimshuffle(0, 1, 'x')
b_t = b_t.dimshuffle(0, 1, 'x')
ss1 = (k_t.dimshuffle(0, 1, 'x') - u_c) ** 2
ss2 = -b_t * ss1
ss3 = a_t * tensor.exp(ss2)
ss4 = ss3.sum(axis=1)
return ss4
def step(xinp_h1_t, xgate_h1_t,
xinp_h2_t, xgate_h2_t,
xinp_h3_t, xgate_h3_t,
h1_tm1, h2_tm1, h3_tm1,
k_tm1, w_tm1, ctx):
attinp_h1, attgate_h1 = att_to_h1.proj(w_tm1)
h1_t = cell1.step(xinp_h1_t + attinp_h1, xgate_h1_t + attgate_h1,
h1_tm1)
h1inp_h2, h1gate_h2 = h1_to_h2.proj(h1_t)
h1inp_h3, h1gate_h3 = h1_to_h3.proj(h1_t)
a_t = h1_t.dot(h1_to_att_a)
b_t = h1_t.dot(h1_to_att_b)
k_t = h1_t.dot(h1_to_att_k)
a_t = tensor.exp(a_t)
b_t = tensor.exp(b_t)
k_t = k_tm1 + tensor.exp(k_t)
ss4 = calc_phi(k_t, a_t, b_t, u)
ss5 = ss4.dimshuffle(0, 1, 'x')
ss6 = ss5 * ctx.dimshuffle(1, 0, 2)
w_t = ss6.sum(axis=1)
attinp_h2, attgate_h2 = att_to_h2.proj(w_t)
attinp_h3, attgate_h3 = att_to_h3.proj(w_t)
h2_t = cell2.step(xinp_h2_t + h1inp_h2 + attinp_h2,
xgate_h2_t + h1gate_h2 + attgate_h2, h2_tm1)
h2inp_h3, h2gate_h3 = h2_to_h3.proj(h2_t)
h3_t = cell3.step(xinp_h3_t + h1inp_h3 + h2inp_h3 + attinp_h3,
xgate_h3_t + h1gate_h3 + h2gate_h3 + attgate_h3,
h3_tm1)
return h1_t, h2_t, h3_t, k_t, w_t
init_x = tensor.fmatrix()
init_x.tag.test_value = np_zeros((minibatch_size, n_feats)).astype(theano.config.floatX)
srng = RandomStreams(1999)
# Used to calculate stopping heuristic from sections 5.3
u_max = 0. * tensor.arange(c_sym.shape[0]) + c_sym.shape[0]
u_max = u_max.dimshuffle('x', 'x', 0)
u_max = tensor.cast(u_max, theano.config.floatX)
def sample_step(x_tm1, h1_tm1, h2_tm1, h3_tm1, k_tm1, w_tm1, ctx):
xinp_h1_t, xgate_h1_t = inp_to_h1.proj(x_tm1)
xinp_h2_t, xgate_h2_t = inp_to_h2.proj(x_tm1)
xinp_h3_t, xgate_h3_t = inp_to_h3.proj(x_tm1)
attinp_h1, attgate_h1 = att_to_h1.proj(w_tm1)
h1_t = cell1.step(xinp_h1_t + attinp_h1, xgate_h1_t + attgate_h1,
h1_tm1)
h1inp_h2, h1gate_h2 = h1_to_h2.proj(h1_t)
h1inp_h3, h1gate_h3 = h1_to_h3.proj(h1_t)
a_t = h1_t.dot(h1_to_att_a)
b_t = h1_t.dot(h1_to_att_b)
k_t = h1_t.dot(h1_to_att_k)
a_t = tensor.exp(a_t)
b_t = tensor.exp(b_t)
k_t = k_tm1 + tensor.exp(k_t)
ss_t = calc_phi(k_t, a_t, b_t, u)
# calculate and return stopping criteria
sh_t = calc_phi(k_t, a_t, b_t, u_max)
ss5 = ss_t.dimshuffle(0, 1, 'x')
ss6 = ss5 * ctx.dimshuffle(1, 0, 2)
w_t = ss6.sum(axis=1)
attinp_h2, attgate_h2 = att_to_h2.proj(w_t)
attinp_h3, attgate_h3 = att_to_h3.proj(w_t)
h2_t = cell2.step(xinp_h2_t + h1inp_h2 + attinp_h2,
xgate_h2_t + h1gate_h2 + attgate_h2, h2_tm1)
h2inp_h3, h2gate_h3 = h2_to_h3.proj(h2_t)
h3_t = cell3.step(xinp_h3_t + h1inp_h3 + h2inp_h3 + attinp_h3,
xgate_h3_t + h1gate_h3 + h2gate_h3 + attgate_h3,
h3_tm1)
out_t = h1_t.dot(h1_to_outs) + h2_t.dot(h2_to_outs) + h3_t.dot(
h3_to_outs)
theano.printing.Print("out_t.shape")(out_t.shape)
out_t_shape = out_t.shape
x_tm1_shuf = x_tm1.dimshuffle(1, 0, 'x')
vinp_t = out_t.dimshuffle(1, 0, 'x')
theano.printing.Print("x_tm1.shape")(x_tm1.shape)
theano.printing.Print("vinp_t.shape")(vinp_t.shape)
init_pred = tensor.zeros((vinp_t.shape[1],), dtype=theano.config.floatX)
init_hidden = tensor.zeros((x_tm1_shuf.shape[1], n_v_proj),
dtype=theano.config.floatX)
def sample_out_step(x_tm1_shuf, vinp_t, pred_fm1, v_h1_tm1):
j_t = concatenate((x_tm1_shuf, vinp_t,
pred_fm1.dimshuffle(0, 'x')),
axis=-1)
theano.printing.Print("j_t.shape")(j_t.shape)
vinp_h1_t, vgate_h1_t = outs_to_v_h1.proj(j_t)
v_h1_t = v_cell1.step(vinp_h1_t, vgate_h1_t, v_h1_tm1)
theano.printing.Print("v_h1_t.shape")(v_h1_t.shape)
pred_f = v_h1_t.dot(pred_proj) + pred_b
theano.printing.Print("pred_f.shape")(pred_f.shape)
return pred_f[:, 0], v_h1_t
r, isupdates = theano.scan(
fn=sample_out_step,
sequences=[x_tm1_shuf, vinp_t],
outputs_info=[init_pred, init_hidden])
(pred_t, v_h1_t) = r
theano.printing.Print("pred_t.shape")(pred_t.shape)
theano.printing.Print("v_h1_t.shape")(v_h1_t.shape)
#pred_t = sigmoid(pre_pred_t)
#x_t = sample_binomial(pred_t, n_bins, srng)
# MSE
x_t = pred_t
return x_t, h1_t, h2_t, h3_t, k_t, w_t, ss_t, sh_t, isupdates
(sampled, h1_s, h2_s, h3_s, k_s, w_s, stop_s, stop_h, supdates) = sample_step(
init_x, init_h1, init_h2, init_h3, init_kappa, init_w, c_sym)
sampled = sampled.dimshuffle(1, 0)
theano.printing.Print("sampled.shape")(sampled.shape)
(h1, h2, h3, kappa, w), updates = theano.scan(
fn=step,
sequences=[inp_h1, inpgate_h1,
inp_h2, inpgate_h2,
inp_h3, inpgate_h3],
outputs_info=[init_h1, init_h2, init_h3, init_kappa, init_w],
non_sequences=[context])
outs = h1.dot(h1_to_outs) + h2.dot(h2_to_outs) + h3.dot(h3_to_outs)
outs_shape = outs.shape
theano.printing.Print("outs.shape")(outs.shape)
outs = outs.dimshuffle(2, 1, 0)
vinp = outs.reshape((outs_shape[2], -1, 1))
theano.printing.Print("vinp.shape")(vinp.shape)
shp = vinp.shape
shuff_inpt_shapes = inpt.shape
theano.printing.Print("inpt.shape")(inpt.shape)
shuff_inpt = inpt.dimshuffle(2, 1, 0)
theano.printing.Print("shuff_inpt.shape")(shuff_inpt.shape)
shuff_inpt = shuff_inpt.reshape((shuff_inpt_shapes[2],
shuff_inpt_shapes[1] * shuff_inpt_shapes[0],
1))
theano.printing.Print("shuff_inpt.shape")(shuff_inpt.shape)
theano.printing.Print("vinp.shape")(vinp.shape)
# input from previous time, pred from previous feature
"""
dimshuffle hacks and [:, 0] to avoid this error:
TypeError: Inconsistency in the inner graph of scan 'scan_fn' : an input
and an output are associated with the same recurrent state and should have
the same type but have type 'TensorType(float32, col)' and
'TensorType(float32, matrix)' respectively.
"""
def out_step(shuff_inpt_tm1, vinp_t, pred_fm1, v_h1_tm1):
j_t = concatenate((shuff_inpt_tm1, vinp_t, pred_fm1.dimshuffle(0, 'x')),
axis=-1)
theano.printing.Print("j_t.shape")(j_t.shape)
vinp_h1_t, vgate_h1_t = outs_to_v_h1.proj(j_t)
v_h1_t = v_cell1.step(vinp_h1_t, vgate_h1_t, v_h1_tm1)
theano.printing.Print("v_h1_t.shape")(v_h1_t.shape)
pred_f = v_h1_t.dot(pred_proj) + pred_b
theano.printing.Print("pred_f.shape")(pred_f.shape)
return pred_f[:, 0], v_h1_t
init_pred = tensor.zeros((vinp.shape[1],), dtype=theano.config.floatX)
init_hidden = tensor.zeros((shuff_inpt.shape[1], n_v_proj),
dtype=theano.config.floatX)
theano.printing.Print("init_pred.shape")(init_pred.shape)
theano.printing.Print("init_hidden.shape")(init_hidden.shape)
r, updates = theano.scan(
fn=out_step,
sequences=[shuff_inpt, vinp],
outputs_info=[init_pred, init_hidden])
(pred, v_h1) = r
theano.printing.Print("pred.shape")(pred.shape)
pred = pred.dimshuffle(1, 0, 'x')
shp = pred.shape
theano.printing.Print("pred.shape")(pred.shape)
pred = pred.reshape((minibatch_size, shp[0] // minibatch_size,
shp[1], shp[2]))
theano.printing.Print("pred.shape")(pred.shape)
pred = pred.dimshuffle(1, 0, 2, 3)
theano.printing.Print("pred.shape")(pred.shape)
pred = pred[:, :, :, 0]
theano.printing.Print("pred.shape")(pred.shape)
theano.printing.Print("target.shape")(target.shape)
# binomial
#pred = sigmoid(pre_pred.reshape((shp[0], shp[1], -1)))
#cost = target * tensor.log(pred) + (n_bins - target) * tensor.log(1 - pred)
# MSE
cost = (pred - target) ** 2
cost = cost * mask.dimshuffle(0, 1, 'x')
# sum over sequence length and features, mean over minibatch
cost = cost.dimshuffle(0, 2, 1)
cost = cost.reshape((-1, cost.shape[2]))
cost = cost.sum(axis=0).mean()
l2_penalty = 0
for p in list(set(params) - set(biases)):
l2_penalty += (p ** 2).sum()
cost = cost + 1E-3 * l2_penalty
grads = tensor.grad(cost, params)
grads = gradient_clipping(grads, 10.)
learning_rate = 1E-4
opt = adam(params, learning_rate)
updates = opt.updates(params, grads)
if args.cont is not None:
print("Continuing training from saved model")
continue_path = args.cont
if not os.path.exists(continue_path):
raise ValueError("Continue model %s, path not "
"found" % continue_path)
saved_checkpoint = load_checkpoint(continue_path)
checkpoint_dict = saved_checkpoint
train_function = checkpoint_dict["train_function"]
cost_function = checkpoint_dict["cost_function"]
predict_function = checkpoint_dict["predict_function"]
attention_function = checkpoint_dict["attention_function"]
sample_function = checkpoint_dict["sample_function"]
"""
trained_weights = get_values_from_function(
saved_checkpoint["train_function"])
set_shared_variables_in_function(train_function, trained_weights)
"""
else:
train_function = theano.function([X_sym, X_mask_sym, c_sym, c_mask_sym,
init_h1, init_h2, init_h3, init_kappa,
init_w],
[cost, h1, h2, h3, kappa, w],
updates=updates)
cost_function = theano.function([X_sym, X_mask_sym, c_sym, c_mask_sym,
init_h1, init_h2, init_h3, init_kappa,
init_w],
[cost, h1, h2, h3, kappa, w])
predict_function = theano.function([X_sym, X_mask_sym, c_sym, c_mask_sym,
init_h1, init_h2, init_h3, init_kappa,
init_w],
[outs],
on_unused_input='warn')
attention_function = theano.function([X_sym, X_mask_sym, c_sym, c_mask_sym,
init_h1, init_h2, init_h3, init_kappa,
init_w],
[kappa, w], on_unused_input='warn')
sample_function = theano.function([init_x, c_sym, c_mask_sym, init_h1, init_h2,
init_h3, init_kappa, init_w],
[sampled, h1_s, h2_s, h3_s, k_s, w_s,
stop_s, stop_h],
on_unused_input="warn",
updates=supdates)
print("Beginning training loop")
checkpoint_dict = {}
checkpoint_dict["train_function"] = train_function
checkpoint_dict["cost_function"] = cost_function
checkpoint_dict["predict_function"] = predict_function
checkpoint_dict["attention_function"] = attention_function
checkpoint_dict["sample_function"] = sample_function
def _loop(function, itr):
prev_h1, prev_h2, prev_h3 = [np_zeros((minibatch_size, n_hid))
for i in range(3)]
prev_kappa = np_zeros((minibatch_size, att_size))
prev_w = np_zeros((minibatch_size, n_chars))
X_mb, X_mb_mask, c_mb, c_mb_mask = next(itr)
n_cuts = len(X_mb) // cut_len + 1
partial_costs = []
for n in range(n_cuts):
start = n * cut_len
stop = (n + 1) * cut_len
if len(X_mb[start:stop]) < cut_len:
new_len = cut_len - len(X_mb) % cut_len
zeros = np.zeros((new_len, X_mb.shape[1],
X_mb.shape[2]))
zeros = zeros.astype(X_mb.dtype)
mask_zeros = np.zeros((new_len, X_mb_mask.shape[1]))
mask_zeros = mask_zeros.astype(X_mb_mask.dtype)
X_mb = np.concatenate((X_mb, zeros), axis=0)
X_mb_mask = np.concatenate((X_mb_mask, mask_zeros), axis=0)
assert len(X_mb[start:stop]) == cut_len
assert len(X_mb_mask[start:stop]) == cut_len
rval = function(X_mb[start:stop],
X_mb_mask[start:stop],
c_mb, c_mb_mask,
prev_h1, prev_h2, prev_h3, prev_kappa, prev_w)
current_cost = rval[0]
prev_h1, prev_h2, prev_h3 = rval[1:4]
prev_h1 = prev_h1[-1]
prev_h2 = prev_h2[-1]
prev_h3 = prev_h3[-1]
prev_kappa = rval[4][-1]
prev_w = rval[5][-1]
partial_costs.append(current_cost)
return partial_costs
run_loop(_loop, train_function, train_itr, cost_function, valid_itr,
n_epochs=n_epochs, checkpoint_dict=checkpoint_dict,
checkpoint_every_n=checkpoint_every_n, skip_minimums=True)
| [
"[email protected]"
]
| |
781a83a87d5fb9e980be34d090ce68cf1aba93a2 | 66c3ff83c3e3e63bf8642742356f6c1817a30eca | /.vim/tmp/neocomplete/buffer_cache/=+home=+dante=+proyectos=+django-1.9=+sermul=+manage.py | dca2c396a7c3a45e15f9cbfa9f80d467b50c38e8 | []
| no_license | pacifi/vim | 0a708e8bc741b4510a8da37da0d0e1eabb05ec83 | 22e706704357b961acb584e74689c7080e86a800 | refs/heads/master | 2021-05-20T17:18:10.481921 | 2020-08-06T12:38:58 | 2020-08-06T12:38:58 | 30,074,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | {'usr', 'bin', 'env', 'python', 'import', 'os', 'sys', 'if', '__name__', '__main__', 'environ', 'setdefault', 'DJANGO_SETTINGS_MODULE', 'sermul', 'settings', 'from', 'django', 'core', 'management', 'execute_from_command_line', 'argv'}
| [
"[email protected]"
]
| |
ac00ac4bb96ebe184493e06849d1d2e99492b860 | 2f96d0e69ce3d6b1ea4623ed5b4c1741d9634ea9 | /tests/dummy_repo/tvm/python/tvm/hybrid/util.py | 556ede1519e92fb2666ef894fd89ca5bfffa2590 | [
"Apache-2.0"
]
| permissive | tqchen/ffi-navigator | ae1e8923e4d5be589beabfadba91f4a3b39e03dd | 46b0d0c6bce388a8e1e2cb7ed28062e889e4596c | refs/heads/main | 2023-02-06T22:32:54.214871 | 2023-02-05T16:25:16 | 2023-02-05T16:25:16 | 230,478,838 | 217 | 24 | Apache-2.0 | 2023-02-05T16:25:18 | 2019-12-27T16:44:58 | Python | UTF-8 | Python | false | false | 921 | py | import ast
import inspect
import logging
import sys
import numpy
from .. import api as _api
from .. import make as _make
from .. import expr as _expr
from .. import stmt as _stmt
from .._ffi.base import numeric_types
from ..tensor import Tensor
from ..container import Array
def replace_io(body, rmap):
"""Replacing tensors usage according to the dict given"""
from .. import ir_pass
def replace(op):
if isinstance(op, _stmt.Provide) and op.func in rmap.keys():
buf = rmap[op.func]
return _make.Provide(buf.op, op.value_index, op.value, op.args)
if isinstance(op, _expr.Call) and op.func in rmap.keys():
buf = rmap[op.func]
return _make.Call(buf.dtype, buf.name, op.args, \
_expr.Call.Halide, buf.op, buf.value_index)
return None
return ir_pass.IRTransform(body, None, replace, ['Provide', 'Call'])
| [
"[email protected]"
]
| |
6f6476757e06d7a487ecf584035e507e47e98cb6 | 9e9d23e7a57c46da27a491a61f19c7239d066bf8 | /biliup/__init__.py | e1ff55cbd324da2fcb10188ba6f6f304a81fa7ea | [
"MIT"
]
| permissive | vmcole/bilibiliupload | f7c667927bfcc4a0c1c5eba96b674729ae776e62 | b5c416451f66c2ebe550694d4c4957129d0e966e | refs/heads/master | 2023-06-09T19:58:33.813073 | 2021-07-06T14:50:18 | 2021-07-06T14:50:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,722 | py | import asyncio
from .common.reload import AutoReload
from .common.timer import Timer
from .engine.event import EventManager, Event
from .engine import config, invert_dict, Plugin
from . import plugins
__version__ = "0.0.8"
def create_event_manager():
streamer_url = {k: v['url'] for k, v in config['streamers'].items()}
inverted_index = invert_dict(streamer_url)
urls = list(inverted_index.keys())
pool1_size = config.get('pool1_size') if config.get('pool1_size') else 3
pool2_size = config.get('pool2_size') if config.get('pool2_size') else 3
# 初始化事件管理器
app = EventManager(config, pool1_size=pool1_size, pool2_size=pool2_size)
app.context['urls'] = urls
app.context['url_status'] = dict.fromkeys(inverted_index, 0)
app.context['checker'] = Plugin(plugins).sorted_checker(urls)
app.context['inverted_index'] = inverted_index
app.context['streamer_url'] = streamer_url
return app
event_manager = create_event_manager()
async def main():
from .handler import CHECK_UPLOAD, CHECK
event_manager.start()
async def check_timer():
event_manager.send_event(Event(CHECK_UPLOAD))
for k in event_manager.context['checker'].keys():
event_manager.send_event(Event(CHECK, (k,)))
wait = config.get('event_loop_interval') if config.get('event_loop_interval') else 40
# 初始化定时器
timer = Timer(func=check_timer, interval=wait)
interval = config.get('check_sourcecode') if config.get('check_sourcecode') else 15
# 模块更新自动重启
detector = AutoReload(event_manager, timer, interval=interval)
await asyncio.gather(detector.astart(), timer.astart(), return_exceptions=True)
| [
"[email protected]"
]
| |
274332a28662cdd27514f4e4d6ea6d2fb35d89f7 | 82db461036ffb2adbf0424a6f0575cd9d24b48a8 | /main.py | aa2b3ceb4b62ba95ae0a6123184a319dd03db241 | []
| no_license | webclinic017/option_pdt | fdc559f02cc529b54278e90e04170713fe93684f | dd302c6b2661e26dbfcbea0384b99e85ae9584e1 | refs/heads/master | 2023-03-24T10:43:35.998775 | 2021-03-19T14:08:38 | 2021-03-19T14:08:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,828 | py | import sys
import os
parent_path = os.path.dirname(sys.path[0])
if parent_path not in sys.path:
sys.path.append(parent_path)
import json
import pickle
import logging
import pandas as pd
import numpy as np
from datetime import datetime
from library import get_strategy
from utils.util_func import *
from optparse import OptionParser
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = OptionParser()
parser.add_option('-f', '--file_name', action='store', type='string', default=None)
(opts, args) = parser.parse_args()
file_path = f'library/strategy/{opts.file_name}.json'
strategy_data_file = opts.file_name.split('_')[0]+"_data"
with open(file_path, 'r') as f:
options = json.load(f)
'''
from datetime import datetime
import pandas as pd
import pickle
positions = pd.read_csv("data/positions_s.csv")
positions['group'] = positions['group'].astype(str)
#hedge_positions = pd.read_csv("data/hedge_positions.csv",index_col=0)
#hedge_positions['group'] = hedge_positions['group'].astype(str)
strategy_data = {'hedge_time':datetime.now()}
with open(f'data/delta_data.pkl','wb') as fw:
pickle.dump(strategy_data, fw)
with open(f'data/customer_position.pkl','wb') as fw:
pickle.dump(positions, fw)
today = datetime.now()
cols = ['EXP_DATE','ask_price', 'bid_price', 'creation_timestamp','instrument_name', 'K','S','cp',
'interest_rate','open_interest','underlying_index', 'volume','TTM']
option_df = pd.read_csv("data/option_df.csv",index_col=0)
option_df = option_df[cols]
#option_df['TTM'] = [days_diff(exp_date,today) for exp_date in option_df['EXP_DATE']]
option_df = option_df[option_df['TTM']>0.1]
portfolio = sim_positions(option_df,6)
subscription_list = [symbol2subs(symbol,"%d%b%y") for symbol in portfolio['instrument_name']]
'''
with open(f'data/{strategy_data_file}.pkl','rb') as fw:
strategy_data = pickle.load(fw)
with open(f'data/customer_position.pkl','rb') as fw:
positions = pickle.load(fw)
positions,is_removed = remove_expired_positions(positions)
if is_removed:
with open(f'data/customer_position.pkl','wb') as fw:
pickle.dump(positions, fw)
hedge_time = strategy_data['hedge_time']
#hedge_positions = strategy_data['hedge_positions']
#positions = {key:{k:0 for k,v in values.items()} for key,values in positions.items()}
#subscription_list = [symbol2subs(symbol,"%Y%m%d") for symbol in positions.keys() if symbol!='BTCUSD']
subscription_list = []
subscription_list.append('Deribit|BTCUSD|perp|ticker')
subscription_list.append('Deribit|BTCUSD|option|summaryinfo')
options['subscription_list'] = list(set(subscription_list))
options['hedge_time'] = hedge_time
options['positions'] = positions
if strategy_data_file == "delta_data":
options['account_target'] = float(strategy_data['account_target'])
stratgy = options['file_name']
context = get_strategy(stratgy)
context.logger.info('Start trading..')
context.config_update(**options)
context.pre_start(**options)
context.start()
#instrument = 'Deribit|BTCUSD-20200925-7000-P|option'
#instrument = 'Deribit|BTCUSD|option|summaryinfo'
#instrument = 'Deribit|BTCUSD|perp'
#context.send_order(instrument, 'sell', 0.1200, 0.1, 'Limit')
#context.send_order(instrument, 'sell', 0.1, 0.1, 'Fak', delay=3000)
#context.send_order(instrument, 'sell', 9500.5, 1, 'Limit',note='maker')
#context.send_order(instrument, 'buy', 8100.5, 1, 'Market',note='taker')
#context.inspect_order(instrument,'3887280714')
#context.send_order(instrument,'buy',7084,0.0706,'Limit')
| [
"[email protected]"
]
| |
e8d6832b01ddb153bea7721f9728d12768dc77a3 | 3c259a3755fa81dbaa5a33591c4bcedb79c20314 | /config/ssef/ssef_eval_cqg_masked_2015.config | 897b88fb2d2d418c25f44140fa30a4d2702f637a | [
"MIT"
]
| permissive | djgagne/hagelslag | f96bea7395d2d967e1dc84faccf910e01b83157b | 17757de7b55737f65f615e5dccad379604961832 | refs/heads/master | 2023-07-24T20:13:07.659540 | 2023-07-13T17:02:00 | 2023-07-13T17:02:00 | 37,555,335 | 64 | 26 | MIT | 2023-07-13T17:02:01 | 2015-06-16T20:48:43 | Jupyter Notebook | UTF-8 | Python | false | false | 3,621 | config | #!/usr/bin/env python
from datetime import datetime
import numpy as np
ensemble_members = ["wrf-s3cn_arw"] + ["wrf-s3m{0:d}_arw".format(m) for m in range(3, 14)]
scratch_path = "/sharp/djgagne/"
experiment_name = "cqg_masked"
config = dict(ensemble_name="SSEF",
ensemble_members=ensemble_members,
start_date=datetime(2015, 5, 12),
end_date=datetime(2015, 6, 5),
start_hour=13,
end_hour=36,
window_sizes=[1, 3, 24],
time_skip=1,
model_names=dict(dist=["Random Forest", "Elastic Net", "Random Forest CV"],
condition=["Random Forest"]),
model_types=["dist", "condition"],
size_thresholds=[5, 25, 50],
condition_threshold=0.5,
dist_thresholds=np.arange(0, 200),
num_max_samples=1000,
forecast_json_path=scratch_path + "track_forecasts_spring2015_{0}_json/".format(experiment_name),
track_data_csv_path=scratch_path + "track_data_spring2015_{0}_csv/".format(experiment_name),
forecast_sample_path=scratch_path + "track_samples_spring2015_{0}/".format(experiment_name),
mrms_path=scratch_path + "mrms_spring2015/",
mrms_variable="MESH_Max_60min_00.50",
obs_mask=True,
mask_variable="RadarQualityIndex_00.00",
forecast_thresholds=np.concatenate(([0, 0.01, 0.02], np.arange(0.05, 1.1, 0.05))),
dilation_radius=13,
forecast_bins={"dist": np.array(["Shape_f", "Location_f", "Scale_f"]),
"condition": np.array(["ProbHail"]),
"translation-x":np.arange(-240000, 264000, 24000),
"translation-y":np.arange(-240000, 264000, 24000),
"start-time":np.arange(-6, 7, 1)
},
object_thresholds=[0, 25, 50],
out_path=scratch_path + "evaluation_data_spring2015_{0}/".format(experiment_name),
obj_scores_file="object_scores_ssef_2015_cqg_closest_",
grid_scores_file="grid_scores_ssef_2015_cqg_cloest.csv",
obs_thresholds=[5, 25, 50, 75],
ensemble_variables=["uh_max", "hailsz", "cqgmax", "r10cmx"],
neighbor_thresholds={"dist": [25, 50],
"uh_max": [25, 75, 150],
"hailsz": [5, 25, 50],
"cqgmax": [5, 25, 50],
"r10cmx": [40, 60]},
neighbor_path="/sharp/djgagne/hail_consensus_ssef_{0}_2015/".format(experiment_name),
neighbor_score_path="/sharp/djgagne/neighbor_scores_ssef_unique_2015/ssef_{0}_diss_".format(experiment_name),
neighbor_radii=[14, 28],
smoothing_radii=[14, 21, 28],
neighbor_radius=42,
neighbor_sigma=1,
ml_grid_path=scratch_path + "hail_forecasts_grib2_ssef_cqg_masked_2015/",
coarse_neighbor_out_path= scratch_path + "ssef_coarse_neighbor_eval_2015/",
map_file = "/home/djgagne/hagelslag/mapfiles/ssef2015.map",
us_mask_file="/home/djgagne/hagelslag/mapfiles/ssef_2015_us_mask.nc",
coordinate_file="/sharp/djgagne/ssef_2015_grid.nc",
lon_bounds=[-106,-80],
lat_bounds=[28,48],
stride=14,
ensemble_path=scratch_path + "spring2015_nc/",
single_step=False,
)
| [
"[email protected]"
]
| |
12712fe4e23a5c73bf59f892cdc1ef0041cd1ab4 | 5410700e83210d003f1ffbdb75499062008df0d6 | /leetcode/isHappy.py | 92bdf82a57b5d864724396b17b24897d123370fd | []
| no_license | lilyandcy/python3 | 81182c35ab8b61fb86f67f7796e057936adf3ab7 | 11ef4ace7aa1f875491163d036935dd76d8b89e0 | refs/heads/master | 2021-06-14T18:41:42.089534 | 2019-10-22T00:24:30 | 2019-10-22T00:24:30 | 144,527,289 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | class Solution:
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
looplist = []
num = n
while num != 1:
if num not in looplist:
looplist.append(num)
else:
return False
num = self.sumLocation(num)
return True
def sumLocation(self, num):
strnum = str(num)
sumnum = 0
for i in range(len(strnum)):
sumnum += int(strnum[i]) ** 2
return sumnum | [
"[email protected]"
]
| |
a0602524e8bd8ee7ffd9da50880916d0a4c0a3da | adea9fc9697f5201f4cb215571025b0493e96b25 | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/global_/timers/spf/state/__init__.py | baee691cec8aaf70f22e9f69662878b17b753e95 | [
"Apache-2.0"
]
| permissive | andyjsharp/napalm-yang | d8a8b51896ef7c6490f011fe265db46f63f54248 | ef80ebbfb50e188f09486380c88b058db673c896 | refs/heads/develop | 2021-09-09T02:09:36.151629 | 2018-03-08T22:44:04 | 2018-03-08T22:44:04 | 114,273,455 | 0 | 0 | null | 2018-03-08T22:44:05 | 2017-12-14T16:33:35 | Python | UTF-8 | Python | false | false | 31,796 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
unicode = str
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/global/timers/spf/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines state information for ISIS SPF timers.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__spf_hold_interval','__spf_first_interval','__spf_second_interval','__adaptive_timer',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__spf_hold_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64)(5000), is_leaf=True, yang_name="spf-hold-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
self.__spf_first_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-first-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
self.__adaptive_timer = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'LINEAR': {}, u'EXPONENTIAL': {}},), is_leaf=True, yang_name="adaptive-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:adaptive-timer-type', is_config=False)
self.__spf_second_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-second-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'global', u'timers', u'spf', u'state']
def _get_spf_hold_interval(self):
"""
Getter method for spf_hold_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_hold_interval (uint64)
YANG Description: SPF Hold Down time interval in milliseconds.
"""
return self.__spf_hold_interval
def _set_spf_hold_interval(self, v, load=False):
"""
Setter method for spf_hold_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_hold_interval (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_spf_hold_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_spf_hold_interval() directly.
YANG Description: SPF Hold Down time interval in milliseconds.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64)(5000), is_leaf=True, yang_name="spf-hold-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """spf_hold_interval must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64)(5000), is_leaf=True, yang_name="spf-hold-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)""",
})
self.__spf_hold_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_spf_hold_interval(self):
self.__spf_hold_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64)(5000), is_leaf=True, yang_name="spf-hold-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
def _get_spf_first_interval(self):
"""
Getter method for spf_first_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_first_interval (uint64)
YANG Description: Time interval in milliseconds between the
detection of topology change and when the SPF algorithm runs.
"""
return self.__spf_first_interval
def _set_spf_first_interval(self, v, load=False):
"""
Setter method for spf_first_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_first_interval (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_spf_first_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_spf_first_interval() directly.
YANG Description: Time interval in milliseconds between the
detection of topology change and when the SPF algorithm runs.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-first-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """spf_first_interval must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-first-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)""",
})
self.__spf_first_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_spf_first_interval(self):
self.__spf_first_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-first-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
def _get_spf_second_interval(self):
"""
Getter method for spf_second_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_second_interval (uint64)
YANG Description: Time interval in milliseconds between the first and second
SPF calculation.
"""
return self.__spf_second_interval
def _set_spf_second_interval(self, v, load=False):
"""
Setter method for spf_second_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_second_interval (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_spf_second_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_spf_second_interval() directly.
YANG Description: Time interval in milliseconds between the first and second
SPF calculation.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-second-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """spf_second_interval must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-second-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)""",
})
self.__spf_second_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_spf_second_interval(self):
self.__spf_second_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-second-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
def _get_adaptive_timer(self):
"""
Getter method for adaptive_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/adaptive_timer (oc-isis-types:adaptive-timer-type)
YANG Description: ISIS adaptive timer types (linear, exponential).
"""
return self.__adaptive_timer
def _set_adaptive_timer(self, v, load=False):
"""
Setter method for adaptive_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/adaptive_timer (oc-isis-types:adaptive-timer-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_adaptive_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_adaptive_timer() directly.
YANG Description: ISIS adaptive timer types (linear, exponential).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'LINEAR': {}, u'EXPONENTIAL': {}},), is_leaf=True, yang_name="adaptive-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:adaptive-timer-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """adaptive_timer must be of a type compatible with oc-isis-types:adaptive-timer-type""",
'defined-type': "oc-isis-types:adaptive-timer-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'LINEAR': {}, u'EXPONENTIAL': {}},), is_leaf=True, yang_name="adaptive-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:adaptive-timer-type', is_config=False)""",
})
self.__adaptive_timer = t
if hasattr(self, '_set'):
self._set()
def _unset_adaptive_timer(self):
self.__adaptive_timer = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'LINEAR': {}, u'EXPONENTIAL': {}},), is_leaf=True, yang_name="adaptive-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:adaptive-timer-type', is_config=False)
spf_hold_interval = __builtin__.property(_get_spf_hold_interval)
spf_first_interval = __builtin__.property(_get_spf_first_interval)
spf_second_interval = __builtin__.property(_get_spf_second_interval)
adaptive_timer = __builtin__.property(_get_adaptive_timer)
_pyangbind_elements = {'spf_hold_interval': spf_hold_interval, 'spf_first_interval': spf_first_interval, 'spf_second_interval': spf_second_interval, 'adaptive_timer': adaptive_timer, }
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/global/timers/spf/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines state information for ISIS SPF timers.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__spf_hold_interval','__spf_first_interval','__spf_second_interval','__adaptive_timer',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__spf_hold_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64)(5000), is_leaf=True, yang_name="spf-hold-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
self.__spf_first_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-first-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
self.__adaptive_timer = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'LINEAR': {}, u'EXPONENTIAL': {}},), is_leaf=True, yang_name="adaptive-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:adaptive-timer-type', is_config=False)
self.__spf_second_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-second-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'global', u'timers', u'spf', u'state']
def _get_spf_hold_interval(self):
"""
Getter method for spf_hold_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_hold_interval (uint64)
YANG Description: SPF Hold Down time interval in milliseconds.
"""
return self.__spf_hold_interval
def _set_spf_hold_interval(self, v, load=False):
"""
Setter method for spf_hold_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_hold_interval (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_spf_hold_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_spf_hold_interval() directly.
YANG Description: SPF Hold Down time interval in milliseconds.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64)(5000), is_leaf=True, yang_name="spf-hold-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """spf_hold_interval must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64)(5000), is_leaf=True, yang_name="spf-hold-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)""",
})
self.__spf_hold_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_spf_hold_interval(self):
self.__spf_hold_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64)(5000), is_leaf=True, yang_name="spf-hold-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
def _get_spf_first_interval(self):
"""
Getter method for spf_first_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_first_interval (uint64)
YANG Description: Time interval in milliseconds between the
detection of topology change and when the SPF algorithm runs.
"""
return self.__spf_first_interval
def _set_spf_first_interval(self, v, load=False):
"""
Setter method for spf_first_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_first_interval (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_spf_first_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_spf_first_interval() directly.
YANG Description: Time interval in milliseconds between the
detection of topology change and when the SPF algorithm runs.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-first-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """spf_first_interval must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-first-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)""",
})
self.__spf_first_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_spf_first_interval(self):
self.__spf_first_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-first-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
def _get_spf_second_interval(self):
"""
Getter method for spf_second_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_second_interval (uint64)
YANG Description: Time interval in milliseconds between the first and second
SPF calculation.
"""
return self.__spf_second_interval
def _set_spf_second_interval(self, v, load=False):
"""
Setter method for spf_second_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/spf_second_interval (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_spf_second_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_spf_second_interval() directly.
YANG Description: Time interval in milliseconds between the first and second
SPF calculation.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-second-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """spf_second_interval must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-second-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)""",
})
self.__spf_second_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_spf_second_interval(self):
self.__spf_second_interval = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="spf-second-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)
def _get_adaptive_timer(self):
"""
Getter method for adaptive_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/adaptive_timer (oc-isis-types:adaptive-timer-type)
YANG Description: ISIS adaptive timer types (linear, exponential).
"""
return self.__adaptive_timer
def _set_adaptive_timer(self, v, load=False):
"""
Setter method for adaptive_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/spf/state/adaptive_timer (oc-isis-types:adaptive-timer-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_adaptive_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_adaptive_timer() directly.
YANG Description: ISIS adaptive timer types (linear, exponential).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'LINEAR': {}, u'EXPONENTIAL': {}},), is_leaf=True, yang_name="adaptive-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:adaptive-timer-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """adaptive_timer must be of a type compatible with oc-isis-types:adaptive-timer-type""",
'defined-type': "oc-isis-types:adaptive-timer-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'LINEAR': {}, u'EXPONENTIAL': {}},), is_leaf=True, yang_name="adaptive-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:adaptive-timer-type', is_config=False)""",
})
self.__adaptive_timer = t
if hasattr(self, '_set'):
self._set()
def _unset_adaptive_timer(self):
self.__adaptive_timer = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'LINEAR': {}, u'EXPONENTIAL': {}},), is_leaf=True, yang_name="adaptive-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:adaptive-timer-type', is_config=False)
spf_hold_interval = __builtin__.property(_get_spf_hold_interval)
spf_first_interval = __builtin__.property(_get_spf_first_interval)
spf_second_interval = __builtin__.property(_get_spf_second_interval)
adaptive_timer = __builtin__.property(_get_adaptive_timer)
_pyangbind_elements = {'spf_hold_interval': spf_hold_interval, 'spf_first_interval': spf_first_interval, 'spf_second_interval': spf_second_interval, 'adaptive_timer': adaptive_timer, }
| [
"[email protected]"
]
| |
994523ad13eaf886d1e9b898c2b4e1e3021ae3a6 | fac37d77a8d00e3d13106bcd728d51a455dd16f2 | /kmer.py | 2c016a97eb7bf7903ce31d36c4622ef1926e080c | []
| no_license | anu-bioinfo/rosalind-4 | c6a628bba94f647cf4a34bdf505f1527af4346a9 | 3ddc659d44298f4dd4b5dde66d7833b4d27a2580 | refs/heads/master | 2020-03-25T13:47:39.521215 | 2014-09-14T02:30:54 | 2014-09-14T02:30:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | #!/usr/bin/env python
from __future__ import print_function
import os
from revp import read_fasta
from subs import substring_find
from lexf import lexf_order
def kmer_composition(dna_string):
output = []
for p in lexf_order(4, 'ACGT'):
pos = list(substring_find(dna_string, ''.join(p)))
output.append(str(len(pos)))
return output
if __name__ == "__main__":
with open(os.path.join('data', 'rosalind_kmer.txt')) as dataset:
seqs = read_fasta(dataset)
dna_string = seqs.popitem(last=False)[1]
print(*kmer_composition(dna_string))
| [
"[email protected]"
]
| |
3f532246345c6898340e9b5f2125626a978ca0cf | fed6c6bdb6276d195bc565e527c3f19369d22b74 | /galaxy-galaxy lensing/prepare_cata/Fourier_Quad_cata/gather_raw_cata.py | 4e38e9d277633610cb84172ab6665238c0c69d4e | []
| no_license | hekunlie/astrophy-research | edbe12d8dde83e0896e982f08b463fdcd3279bab | 7b2b7ada7e7421585e8993192f6111282c9cbb38 | refs/heads/master | 2021-11-15T05:08:51.271669 | 2021-11-13T08:53:33 | 2021-11-13T08:53:33 | 85,927,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,291 | py | import matplotlib
matplotlib.use("Agg")
import os
my_home = os.popen("echo $MYWORK_DIR").readlines()[0][:-1]
from sys import path
path.append('%s/work/mylib/'%my_home)
import tool_box
import h5py
from mpi4py import MPI
import numpy
import time
from subprocess import Popen
import warnings
warnings.filterwarnings('error')
# The new Fourier_Quad catalog differs from the old version!!!
# collect: collect the data from the files of each field. It creates the "fourier_cata.hdf5" in
# the parent directory of the one contain the field catalog.
# If the catalog file doesn't exist, run it firstly !!!.
# It will add the redshift parameters from CFHT catalog into the finial catalog.
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
cpus = comm.Get_size()
data_path = "/mnt/perc/hklee/CFHT/catalog/fourier_cata_new/"
raw_cata_path = data_path + "raw_cata_new/"
dicts, fields = tool_box.field_dict(data_path + "nname.dat")
my_field = tool_box.allot(fields, cpus)[rank]
chip_num = 36
for field_nm in my_field:
field_path = raw_cata_path + "%s/"%field_nm
files = os.listdir(field_path)
chip_exps = []
for nm in files:
if ".dat" in nm:
exp_nm = nm.split("p")[0]
if exp_nm not in chip_exps:
chip_exps.append(exp_nm)
chip_exps.sort()
file_count = 0
for exp_nm in chip_exps:
for i in range(1,chip_num+1):
chip_nm = "%sp_%d_shear.dat"%(exp_nm, i)
chip_path = field_path + chip_nm
if os.path.exists(chip_path):
try:
temp = numpy.loadtxt(chip_path, skiprows=1)
if file_count == 0:
data = temp
else:
data = numpy.row_stack((data, temp))
file_count += 1
except:
file_size = os.path.getsize(chip_path)/1024.
print("Empty: %s (%.3f KB)"%(chip_nm, file_size))
else:
print("Can't find %d"%chip_nm)
if file_count > 0:
final_path = data_path + "%s/%s_shear_raw.cat"%(field_nm, field_nm)
numpy.savetxt(final_path, data)
h5f = h5py.File(final_path,"w")
h5f["/data"] = data
h5f.close()
| [
"[email protected]"
]
| |
038be3106c05dcfa1cf28d115152639a38956939 | aa2645c96047d775061e0443299c64fc5b255027 | /0405 if1.py | a77e5980ffceb18e44a2854875622938e9a1089f | []
| no_license | sunnyhyo/Problem-Solving-and-SW-programming | ca63b705b27ebb49d32a0a6591211250f213d019 | 8689b9728c028a870dfba7a4d16601a248c7e792 | refs/heads/master | 2021-03-30T21:07:27.276272 | 2018-06-14T15:27:22 | 2018-06-14T15:27:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py |
#조건문 1/4
score=input("점수입력")
score=int(score)
if score > 90:
print("합격!!!")
print("장학금도 받을 수 있음")
| [
"[email protected]"
]
| |
3ee2e5b8231c35ed791296508519b38ff68c5c91 | 063775cfd2642614feb1d340a19740d3be3b4239 | /rich/console.py | 774f1bcf16f2c27f2bd2a00e20e9bc16f4c9ddd5 | [
"MIT"
]
| permissive | adamchainz/rich | 7e0a328a6a5d0673255aa7f364d22e802a51b3e3 | 7b00f0ecb15a4698931d49922a665a6f02782e29 | refs/heads/master | 2023-08-18T13:40:07.405137 | 2020-01-26T17:24:55 | 2020-01-26T17:24:55 | 236,697,550 | 0 | 0 | MIT | 2020-01-28T09:18:29 | 2020-01-28T09:18:28 | null | UTF-8 | Python | false | false | 32,567 | py | from collections import ChainMap
from collections.abc import Mapping, Sequence
from contextlib import contextmanager
from dataclasses import dataclass, replace
from enum import Enum
import inspect
from itertools import chain
import os
from operator import itemgetter
import re
import shutil
import sys
from typing import (
Any,
Callable,
Dict,
IO,
Iterable,
List,
Optional,
NamedTuple,
overload,
Tuple,
TYPE_CHECKING,
Union,
)
from typing_extensions import Protocol, runtime_checkable, Literal
from ._emoji_replace import _emoji_replace
from . import markup
from .render_width import RenderWidth
from ._log_render import LogRender
from .default_styles import DEFAULT_STYLES
from . import errors
from .color import ColorSystem
from .highlighter import NullHighlighter, ReprHighlighter
from .pretty import Pretty
from .style import Style
from .tabulate import tabulate_mapping
from . import highlighter
from . import themes
from .pretty import Pretty
from .theme import Theme
from .segment import Segment
if TYPE_CHECKING: # pragma: no cover
from .text import Text
HighlighterType = Callable[[Union[str, "Text"]], "Text"]
JustifyValues = Optional[Literal["left", "center", "right", "full"]]
CONSOLE_HTML_FORMAT = """\
<!DOCTYPE html>
<head>
<style>
{stylesheet}
body {{
color: {foreground};
background-color: {background};
}}
</style>
</head>
<html>
<body>
<code>
<pre style="font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">{code}</pre>
</code>
</body>
</html>
"""
@dataclass
class ConsoleOptions:
"""Options for __console__ method."""
min_width: int
max_width: int
is_terminal: bool
encoding: str
justify: Optional[JustifyValues] = None
def update(
self,
width: int = None,
min_width: int = None,
max_width: int = None,
justify: JustifyValues = None,
):
"""Update values, return a copy."""
options = replace(self)
if width is not None:
options.min_width = options.max_width = width
if min_width is not None:
options.min_width = min_width
if max_width is not None:
options.max_width = max_width
if justify is not None:
options.justify = justify
return options
@runtime_checkable
class ConsoleRenderable(Protocol):
"""An object that supports the console protocol."""
def __console__(
self, console: "Console", options: "ConsoleOptions"
) -> Iterable[Union["ConsoleRenderable", Segment]]: # pragma: no cover
...
RenderableType = Union[ConsoleRenderable, Segment, str]
RenderResult = Iterable[Union[ConsoleRenderable, Segment]]
_null_highlighter = NullHighlighter()
class ConsoleDimensions(NamedTuple):
"""Size of the terminal."""
width: int
height: int
class StyleContext:
"""A context manager to manage a style."""
def __init__(self, console: "Console", style: Optional[Style]):
self.console = console
self.style = style
def __enter__(self) -> "Console":
if self.style is not None:
self.console.push_style(self.style)
self.console._enter_buffer()
return self.console
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.console._exit_buffer()
if self.style is not None:
self.console.pop_style()
COLOR_SYSTEMS = {
"standard": ColorSystem.STANDARD,
"256": ColorSystem.EIGHT_BIT,
"truecolor": ColorSystem.TRUECOLOR,
}
_COLOR_SYSTEMS_NAMES = {system: name for name, system in COLOR_SYSTEMS.items()}
class Console:
"""A high level console interface.
Args:
color_system (str, optional): The color system supported by your terminal,
either ``"standard"``, ``"256"`` or ``"truecolor"``. Leave as ``"auto"`` to autodetect.
styles (Dict[str, Style], optional): An optional mapping of style name strings to :class:`~rich.style.Style` objects.
file (IO, optional): A file object where the console should write to. Defaults to stdoutput.
width (int, optional): The width of the terminal. Leave as default to auto-detect width.
height (int, optional): The height of the terminal. Leave as default to auto-detect height.
record (bool, optional): Boolean to enable recording of terminal output,
required to call :meth:`export_html` and :meth:`export_text`. Defaults to False.
markup (bool, optional): Boolean to enable :ref:`console_markup`. Defaults to True.
log_time (bool, optional): Boolean to enable logging of time by :meth:`log` methods. Defaults to True.
log_path (bool, optional): Boolean to enable the logging of the caller by :meth:`log`. Defaults to True.
log_time_format (str, optional): Log time format if ``log_time`` is enabled. Defaults to "[%X] ".
highlighter(HighlighterType, optional): Default highlighter.
"""
def __init__(
self,
color_system: Optional[
Literal["auto", "standard", "256", "truecolor"]
] = "auto",
styles: Dict[str, Style] = None,
file: IO = None,
width: int = None,
height: int = None,
record: bool = False,
markup: bool = True,
log_time: bool = True,
log_path: bool = True,
log_time_format: str = "[%X] ",
highlighter: Optional["HighlighterType"] = ReprHighlighter(),
):
self._styles = ChainMap(DEFAULT_STYLES if styles is None else styles)
self.file = file or sys.stdout
self._width = width
self._height = height
self.record = record
self._markup = markup
if color_system is None:
self._color_system = None
elif color_system == "auto":
self._color_system = self._detect_color_system()
else:
self._color_system = COLOR_SYSTEMS[color_system]
self.buffer: List[Segment] = []
self._buffer_index = 0
self._record_buffer: List[Segment] = []
default_style = Style()
self.style_stack: List[Style] = [default_style]
self.current_style = default_style
self._log_render = LogRender(
show_time=log_time, show_path=log_path, time_format=log_time_format
)
self.highlighter: HighlighterType = highlighter or _null_highlighter
def __repr__(self) -> str:
return f"<console width={self.width} {str(self._color_system)}>"
def _detect_color_system(self,) -> Optional[ColorSystem]:
"""Detect color system from env vars."""
if not self.is_terminal:
return None
if os.environ.get("COLORTERM", "").strip().lower() == "truecolor":
return ColorSystem.TRUECOLOR
# 256 can be considered standard nowadays
return ColorSystem.EIGHT_BIT
def _enter_buffer(self) -> None:
"""Enter in to a buffer context, and buffer all output."""
self._buffer_index += 1
def _exit_buffer(self) -> None:
"""Leave buffer context, and render content if required."""
self._buffer_index -= 1
self._check_buffer()
def __enter__(self) -> "Console":
"""Own context manager to enter buffer context."""
self._enter_buffer()
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
"""Exit buffer context."""
self._exit_buffer()
def push_styles(self, styles: Dict[str, Style]) -> None:
"""Merge set of styles with currently active styles.
Args:
styles (Dict[str, Style]): A mapping of style name to Style instance.
"""
self._styles.maps.append(styles)
@property
def color_system(self) -> Optional[str]:
"""Get color system string.
Returns:
Optional[str]: "standard", "256" or "truecolor".
"""
if self._color_system is not None:
return _COLOR_SYSTEMS_NAMES[self._color_system]
else:
return None
@property
def encoding(self) -> str:
"""Get the encoding of the console file, e.g. ``"utf-8"``.
Returns:
str: A standard encoding string.
"""
return getattr(self.file, "encoding", "utf-8")
@property
def is_terminal(self) -> bool:
"""Check if the console is writing to a terminal.
Returns:
bool: True if the console writting to a device capable of
understanding terminal codes, otherwise False.
"""
isatty = getattr(self.file, "isatty", None)
return False if isatty is None else isatty()
@property
def options(self) -> ConsoleOptions:
"""Get default console options."""
return ConsoleOptions(
min_width=1,
max_width=self.width,
encoding=self.encoding,
is_terminal=self.is_terminal,
)
@property
def size(self) -> ConsoleDimensions:
"""Get the size of the console.
Returns:
ConsoleDimensions: A named tuple containing the dimensions.
"""
if self._width is not None and self._height is not None:
return ConsoleDimensions(self._width, self._height)
width, height = shutil.get_terminal_size()
return ConsoleDimensions(
width if self._width is None else self._width,
height if self._height is None else self._height,
)
@property
def width(self) -> int:
"""Get the width of the console.
Returns:
int: The width (in characters) of the console.
"""
width, _ = self.size
return width
def line(self, count: int = 1) -> None:
"""Write new line(s).
Args:
count (int, optional): Number of new lines. Defaults to 1.
"""
assert count >= 0, "count must be >= 0"
if count:
self.buffer.append(Segment("\n" * count))
self._check_buffer()
def _render(
self, renderable: RenderableType, options: Optional[ConsoleOptions]
) -> Iterable[Segment]:
"""Render an object in to an iterable of `Segment` instances.
This method contains the logic for rendering objects with the console protocol.
You are unlikely to need to use it directly, unless you are extending the library.
Args:
renderable (RenderableType): An object supporting the console protocol, or
an object that may be converted to a string.
options (ConsoleOptions, optional): An options objects. Defaults to None.
Returns:
Iterable[Segment]: An iterable of segments that may be rendered.
"""
render_iterable: Iterable[RenderableType]
render_options = options or self.options
if isinstance(renderable, Segment):
yield renderable
return
elif isinstance(renderable, ConsoleRenderable):
render_iterable = renderable.__console__(self, render_options)
elif isinstance(renderable, str):
from .text import Text
yield from self._render(Text(renderable), render_options)
return
else:
raise errors.NotRenderableError(
f"Unable to render {renderable!r}; "
"A str, Segment or object with __console__ method is required"
)
for render_output in render_iterable:
if isinstance(render_output, Segment):
yield render_output
else:
yield from self.render(render_output, render_options)
def render(
self, renderable: RenderableType, options: Optional[ConsoleOptions]
) -> Iterable[Segment]:
"""Render an object in to an iterable of `Segment` instances.
This method contains the logic for rendering objects with the console protocol.
You are unlikely to need to use it directly, unless you are extending the library.
Args:
renderable (RenderableType): An object supporting the console protocol, or
an object that may be converted to a string.
options (ConsoleOptions, optional): An options objects. Defaults to None.
Returns:
Iterable[Segment]: An iterable of segments that may be rendered.
"""
yield from Segment.apply_style(
self._render(renderable, options), self.current_style
)
def render_all(
self, renderables: Iterable[RenderableType], options: Optional[ConsoleOptions]
) -> Iterable[Segment]:
"""Render a number of console objects.
Args:
renderables (Iterable[RenderableType]): Console objects.
options (Optional[ConsoleOptions]): Options for render.
Returns:
Iterable[Segment]: Segments to be written to the console.
"""
render_options = options or self.options
for renderable in renderables:
yield from self.render(renderable, render_options)
def render_lines(
self,
renderable: RenderableType,
options: Optional[ConsoleOptions],
style: Optional[Style] = None,
) -> List[List[Segment]]:
"""Render objects in to a list of lines.
The output of render_lines is useful when further formatting of rendered console text
is required, such as the Panel class which draws a border around any renderable object.
Args:
renderables (Iterable[RenderableType]): Any object or objects renderable in the console.
options (Optional[ConsoleOptions]): Console options used to render with.
Returns:
List[List[Segment]]: A list of lines, where a line is a list of Segment objects.
"""
render_options = options or self.options
with self.style(style or "none"):
_rendered = self.render(renderable, render_options)
lines = list(
Segment.split_and_crop_lines(
_rendered, render_options.max_width, style=style
)
)
return lines
def render_str(self, text: str) -> "Text":
"""Convert a string to a Text instance.
Args:
text (str): Text to render.
Returns:
ConsoleRenderable: Renderable object.
"""
if self._markup:
return markup.render(text)
return markup.render_text(text)
def _get_style(self, name: str) -> Optional[Style]:
"""Get a named style, or `None` if it doesn't exist.
Args:
name (str): The name of a style.
Returns:
Optional[Style]: A Style object for the given name, or `None`.
"""
return self._styles.get(name, None)
def get_style(
self, name: Union[str, Style], *, default: Union[Style, str] = None
) -> Style:
"""Get a style merged with the current style.
Args:
name (str): The name of a style or a style definition.
Returns:
Style: A Style object.
Raises:
MissingStyle: If no style could be parsed from name.
"""
if isinstance(name, Style):
return name
try:
return self._styles.get(name) or Style.parse(name)
except errors.StyleSyntaxError as error:
if default is not None:
return self.get_style(default)
if " " in name:
raise
raise errors.MissingStyle(f"No style named {name!r}; {error}")
def push_style(self, style: Union[str, Style]) -> None:
"""Push a style on to the stack.
The new style will be applied to all `write` calls, until
`pop_style` is called.
Args:
style (Union[str, Style]): New style to merge with current style.
Returns:
None: [description]
"""
if isinstance(style, str):
style = self.get_style(style)
self.current_style = self.current_style + style
self.style_stack.append(self.current_style)
def pop_style(self) -> Style:
"""Pop a style from the stack.
This will revert to the style applied prior to the corresponding `push_style`.
Returns:
Style: The previously applied style.
"""
if len(self.style_stack) == 1:
raise errors.StyleStackError(
"Can't pop the default style (check there is `push_style` for every `pop_style`)"
)
style = self.style_stack.pop()
self.current_style = self.style_stack[-1]
return style
def style(self, style: Optional[Union[str, Style]]) -> StyleContext:
"""A context manager to apply a new style.
Example:
with context.style("bold red"):
context.print("Danger Will Robinson!")
Args:
style (Union[str, Style]): New style to apply.
Returns:
StyleContext: A style context manager.
"""
if style is None:
return StyleContext(self, None)
if isinstance(style, str):
_style = self.get_style(style)
else:
if not isinstance(style, Style):
raise TypeError(f"style must be a str or Style instance, not {style!r}")
_style = style
return StyleContext(self, _style)
def _collect_renderables(
self,
objects: Iterable[Any],
sep: str,
end: str,
emoji=True,
highlight: bool = True,
) -> List[ConsoleRenderable]:
"""Combined a number of renderables and text in to one renderable.
Args:
renderables (Iterable[Union[str, ConsoleRenderable]]): [description]
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\n".
emoji (bool): If True, emoji codes will be replaced, otherwise emoji codes will be left in.
highlight (bool, optional): Perform highlighting. Defaults to True.
Returns:
List[ConsoleRenderable]: A list of things to render.
"""
from .text import Text
sep_text = Text(sep)
end_text = Text(end)
renderables: List[ConsoleRenderable] = []
append = renderables.append
text: List[Text] = []
append_text = text.append
_highlighter: HighlighterType
if highlight:
_highlighter = self.highlighter
else:
_highlighter = _null_highlighter
def check_text() -> None:
if text:
if end:
append_text(end_text)
append(sep_text.join(text))
del text[:]
for renderable in objects:
if isinstance(renderable, ConsoleRenderable):
check_text()
append(renderable)
continue
console_str_callable = getattr(renderable, "__console_str__", None)
if console_str_callable is not None:
append_text(console_str_callable())
continue
if isinstance(renderable, str):
render_str = renderable
if emoji:
render_str = _emoji_replace(render_str)
render_text = self.render_str(render_str)
append_text(_highlighter(render_text))
elif isinstance(renderable, Text):
append_text(renderable)
elif isinstance(renderable, (int, float, bool, bytes, type(None))):
append_text(_highlighter(repr(renderable)))
elif isinstance(renderable, (Mapping, Sequence)):
check_text()
append(Pretty(renderable, highlighter=_highlighter))
else:
append_text(_highlighter(repr(renderable)))
check_text()
return renderables
def rule(self, title: str = "", character: str = "─") -> None:
"""Draw a line with optional centered title.
Args:
title (str, optional): Text to render over the rule. Defaults to "".
character (str, optional): Character to form the line. Defaults to "─".
"""
from .text import Text
width = self.width
if not title:
self.print(Text(character * width, "rule.line"))
else:
title_text = Text.from_markup(title, "rule.text")
if len(title_text) > width - 4:
title_text.set_length(width - 4)
rule_text = Text()
center = (width - len(title_text)) // 2
rule_text.append(character * (center - 1) + " ", "rule.line")
rule_text.append(title_text)
rule_text.append(
" " + character * (width - len(rule_text) - 1), "rule.line"
)
self.print(rule_text)
def print(
self,
*objects: Any,
sep=" ",
end="\n",
style: Union[str, Style] = None,
emoji=True,
highlight: bool = True,
) -> None:
r"""Print to the console.
Args:
objects (positional args): Objects to log to the terminal.
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\n".
style (Union[str, Style], optional): A style to apply to output. Defaults to None.
emoji (bool): If True, emoji codes will be replaced, otherwise emoji codes will be left in.
highlight (bool, optional): Perform highlighting. Defaults to True.
"""
if not objects:
self.line()
return
renderables = self._collect_renderables(
objects, sep=sep, end=end, emoji=emoji, highlight=highlight,
)
render_options = self.options
extend = self.buffer.extend
render = self.render
with self.style(style):
for renderable in renderables:
extend(render(renderable, render_options))
def log(
self,
*objects: Any,
sep=" ",
end="\n",
highlight: bool = True,
log_locals: bool = False,
_stack_offset=1,
) -> None:
r"""Log rich content to the terminal.
Args:
objects (positional args): Objects to log to the terminal.
sep (str, optional): String to write between print data. Defaults to " ".
end (str, optional): String to write at end of print data. Defaults to "\n".
highlight (bool, optional): Perform highlighting. Defaults to True.
log_locals (bool, optional): Boolean to enable logging of locals where ``log()``
was called. Defaults to False.
_stack_offset (int, optional): Offset of caller from end of call stack. Defaults to 1.
"""
if not objects:
self.line()
return
renderables = self._collect_renderables(
objects, sep=sep, end=end, highlight=highlight
)
caller = inspect.stack()[_stack_offset]
path = caller.filename.rpartition(os.sep)[-1]
line_no = caller.lineno
if log_locals:
locals_map = {
key: value
for key, value in caller.frame.f_locals.items()
if not key.startswith("__")
}
renderables.append(tabulate_mapping(locals_map, title="Locals"))
with self:
self.buffer.extend(
self.render(
self._log_render(self, renderables, path=path, line_no=line_no),
self.options,
)
)
def _check_buffer(self) -> None:
"""Check if the buffer may be rendered."""
if self._buffer_index == 0:
text = self._render_buffer()
self.file.write(text)
def _render_buffer(self) -> str:
"""Render buffered output, and clear buffer."""
output: List[str] = []
append = output.append
color_system = self._color_system
buffer = self.buffer[:]
if self.record:
self._record_buffer.extend(buffer)
del self.buffer[:]
for line in Segment.split_and_crop_lines(buffer, self.width):
for text, style in line:
if style:
append(style.render(text, color_system=color_system, reset=True))
else:
append(text)
append("\n")
rendered = "".join(output)
return rendered
def export_text(self, clear: bool = True, styles: bool = False) -> str:
"""Generate text from console contents (requires record=True argument in constructor).
Args:
clear (bool, optional): Set to ``True`` to clear the record buffer after exporting.
styles (bool, optional): If ``True``, ansi style codes will be included. ``False`` for plain text.
Defaults to ``False``.
Returns:
str: String containing console contents.
"""
assert (
self.record
), "To export console contents set record=True in the constructor or instance"
if styles:
text = "".join(
(style.render(text, reset=True) if style else text)
for text, style in self._record_buffer
)
else:
text = "".join(text for text, _ in self._record_buffer)
if clear:
del self._record_buffer[:]
return text
def save_text(self, path: str, clear: bool = True, styles: bool = False) -> None:
"""Generate text from console and save to a given location (requires record=True argument in constructor).
Args:
path (str): Path to write text files.
clear (bool, optional): Set to ``True`` to clear the record buffer after exporting.
styles (bool, optional): If ``True``, ansi style codes will be included. ``False`` for plain text.
Defaults to ``False``.
"""
text = self.export_text(clear=clear, styles=styles)
with open(path, "wt") as write_file:
write_file.write(text)
def export_html(
self,
theme: Theme = None,
clear: bool = True,
code_format: str = None,
inline_styles: bool = False,
) -> str:
"""Generate HTML from console contents (requires record=True argument in constructor).
Args:
theme (Theme, optional): Theme object containing console colors.
clear (bool, optional): Set to ``True`` to clear the record buffer after generating the HTML.
code_format (str, optional): Format string to render HTML, should contain {foreground}
{background} and {code}.
inline_styes (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
Defaults to False.
Returns:
str: String containing console contents as HTML.
"""
assert (
self.record
), "To export console contents set record=True in the constructor or instance"
fragments: List[str] = []
append = fragments.append
_theme = theme or themes.DEFAULT
stylesheet = ""
def escape(text: str) -> str:
"""Escape html."""
return text.replace("&", "&").replace("<", "<").replace(">", ">")
render_code_format = CONSOLE_HTML_FORMAT if code_format is None else code_format
if inline_styles:
for text, style in Segment.simplify(self._record_buffer):
text = escape(text)
if style:
rule = style.get_html_style(_theme)
append(f'<span style="{rule}">{text}</span>' if rule else text)
else:
append(text)
else:
styles: Dict[str, int] = {}
for text, style in Segment.simplify(self._record_buffer):
text = escape(text)
if style:
rule = style.get_html_style(_theme)
if rule:
style_number = styles.setdefault(rule, len(styles) + 1)
append(f'<span class="r{style_number}">{text}</span>')
else:
append(text)
else:
append(text)
stylesheet_rules: List[str] = []
stylesheet_append = stylesheet_rules.append
for style_rule, style_number in styles.items():
if style_rule:
stylesheet_append(f".r{style_number} {{{style_rule}}}")
stylesheet = "\n".join(stylesheet_rules)
rendered_code = render_code_format.format(
code="".join(fragments),
stylesheet=stylesheet,
foreground=_theme.foreground_color.hex,
background=_theme.background_color.hex,
)
if clear:
del self._record_buffer[:]
return rendered_code
def save_html(
self,
path: str,
theme: Theme = None,
clear: bool = True,
code_format=CONSOLE_HTML_FORMAT,
inline_styles: bool = False,
) -> None:
"""Generate HTML from console contents and write to a file (requires record=True argument in constructor).
Args:
path (str): Path to write html file.
theme (Theme, optional): Theme object containing console colors.
clear (bool, optional): Set to True to clear the record buffer after generating the HTML.
code_format (str, optional): Format string to render HTML, should contain {foreground}
{background} and {code}.
inline_styes (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
Defaults to False.
"""
html = self.export_html(
theme=theme,
clear=clear,
code_format=code_format,
inline_styles=inline_styles,
)
with open(path, "wt") as write_file:
write_file.write(html)
if __name__ == "__main__": # pragma: no cover
console = Console()
with console.style("dim on black"):
console.print("[b]Hello[/b], [i]World[/i]!")
console.print("Hello, *World*!")
console.log(
"JSONRPC *request*",
5,
1.3,
True,
False,
None,
{
"jsonrpc": "2.0",
"method": "subtract",
"params": {"minuend": 42, "subtrahend": 23},
"id": 3,
},
)
console.log("# Hello, **World**!")
console.log("Hello, World!", "{'a': 1}", repr(console))
console.log(
{
"name": None,
"empty": [],
"quiz": {
"sport": {
"answered": True,
"q1": {
"question": "Which one is correct team name in NBA?",
"options": [
"New York Bulls",
"Los Angeles Kings",
"Golden State Warriros",
"Huston Rocket",
],
"answer": "Huston Rocket",
},
},
"maths": {
"answered": False,
"q1": {
"question": "5 + 7 = ?",
"options": [10, 11, 12, 13],
"answer": 12,
},
"q2": {
"question": "12 - 8 = ?",
"options": [1, 2, 3, 4],
"answer": 4,
},
},
},
}
)
console.log("foo")
| [
"[email protected]"
]
| |
9d54ff837c1a8f276a97e819ccf6c7a49e66713b | 24144f83276705fe2f4df295ee50199c2035ca7b | /active/theses-mainz.py | 0acd9b145b345b370518620e935b1280fb1eaed5 | []
| no_license | AcidBurn429/ejlmod | a2e4eb6bb28bcb6bbccc3d83e2e24f5aed23d4eb | dec50edbb14380686072d7311589a2363ef5cd00 | refs/heads/master | 2023-08-14T21:19:10.890194 | 2021-09-28T13:39:06 | 2021-09-28T13:39:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,405 | py | # -*- coding: utf-8 -*-
#harvest theses from Mainz U.
#FS: 2020-01-27
import getopt
import sys
import os
import urllib2
import urlparse
from bs4 import BeautifulSoup
import re
import ejlmod2
import codecs
import datetime
import time
import json
xmldir = '/afs/desy.de/user/l/library/inspire/ejl'
retfiles_path = "/afs/desy.de/user/l/library/proc/retinspire/retfiles"
now = datetime.datetime.now()
stampoftoday = '%4d-%02d-%02d' % (now.year, now.month, now.day)
publisher = 'Mainz U.'
jnlfilename = 'THESES-MAINZ-%s' % (stampoftoday)
hdr = {'User-Agent' : 'Magic Browser'}
recs = []
rpp = 40
pages = 3
for page in range(pages):
tocurl = 'https://openscience.ub.uni-mainz.de/simple-search?query=&filter_field_1=organisationalUnit&filter_type_1=equals&filter_value_1=FB+08+Physik%2C+Mathematik+u.+Informatik&filter_field_2=publicationType&filter_type_2=equals&filter_value_2=Dissertation&sort_by=dc.date.issued_dt&order=desc&rpp=' + str(rpp) + '&etal=0&start=' + str(page*rpp)
print '==={ %i/%i }==={ %s }===' % (page+1, pages, tocurl)
tocpage = BeautifulSoup(urllib2.build_opener(urllib2.HTTPCookieProcessor).open(tocurl))
for tr in tocpage.body.find_all('tr'):
rec = {'tc' : 'T', 'keyw' : [], 'jnl' : 'BOOK', 'note' : []}
for td in tr.find_all('td', attrs = {'headers' : 't1'}):
rec['year'] = td.text.strip()
rec['date'] = td.text.strip()
for td in tr.find_all('td', attrs = {'headers' : 't3'}):
for a in td.find_all('a'):
rec['tit'] = a.text.strip()
rec['hdl'] = re.sub('.*handle\/', '', a['href'])
rec['artlink'] = 'https://openscience.ub.uni-mainz.de' + a['href']
recs.append(rec)
time.sleep(10)
i = 0
for rec in recs:
i += 1
print '---{ %i/%i }---{ %s }------' % (i, len(recs), rec['artlink'])
try:
artpage = BeautifulSoup(urllib2.build_opener(urllib2.HTTPCookieProcessor).open(rec['artlink']))
time.sleep(4)
except:
try:
print "retry %s in 180 seconds" % (rec['artlink'])
time.sleep(180)
artpage = BeautifulSoup(urllib2.build_opener(urllib2.HTTPCookieProcessor).open(rec['artlink']))
except:
print "no access to %s" % (rec['artlink'])
continue
for tr in artpage.body.find_all('tr'):
for td in tr.find_all('td', attrs = {'class' : 'metadataFieldLabel'}):
tdt = td.text.strip()
for td in tr.find_all('td', attrs = {'class' : 'metadataFieldValue'}):
#authors
if tdt == 'Authors:':
rec['autaff'] = [[ td.text.strip(), publisher ]]
#language
elif tdt == 'Language :':
if td.text.strip() == 'german':
rec['language'] = 'German'
#abstract
elif tdt == 'Abstract:':
rec['abs'] = td.text.strip()
#license
elif re.search('Information', tdt):
for a in td.find_all('a'):
if re.search('creativecommons.org', a['href']):
rec['license'] = {'url' : a['href']}
#pages
elif tdt == 'Extent:':
if re.search('\d\d', td.text):
rec['pages'] = re.sub('.*?(\d\d+).*', r'\1', td.text.strip())
#DOI
elif tdt == 'DOI:':
for a in td.find_all('a'):
rec['doi'] = re.sub('.*org\/', '', a['href'])
#FFT
for td in tr.find_all('td', attrs = {'class' : 'standard'}):
for a in td.find_all('a'):
if re.search('pdf$', a['href']):
if 'license' in rec.keys():
rec['FFT'] = 'https://openscience.ub.uni-mainz.de' + a['href']
else:
rec['hidden'] = 'https://openscience.ub.uni-mainz.de' + a['href']
print ' ', rec.keys()
#closing of files and printing
xmlf = os.path.join(xmldir, jnlfilename+'.xml')
xmlfile = codecs.EncodedFile(codecs.open(xmlf, mode='wb'), 'utf8')
ejlmod2.writenewXML(recs, xmlfile, publisher, jnlfilename)
xmlfile.close()
#retrival
retfiles_text = open(retfiles_path, "r").read()
line = jnlfilename+'.xml'+ "\n"
if not line in retfiles_text:
retfiles = open(retfiles_path, "a")
retfiles.write(line)
retfiles.close()
| [
"[email protected]"
]
| |
d49bcc85fb670923856b90cd4b3431c31b19fed9 | 8671856181ef218f147f23f367fd0b1dc7592e1a | /realtor/migrations/0020_auto_20190918_1213.py | 69d2a3d67932c1247662582520c4265d41e2eef5 | []
| no_license | Alishrf/Shop_Website | e4fef9618aec2db6f4a655ff643aa68cf42dbb68 | 971d4a2ff8b7a68a0157681ff26404fe403502e6 | refs/heads/master | 2020-08-11T06:03:47.642870 | 2019-10-14T14:29:30 | 2019-10-14T14:29:30 | 214,504,737 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | # Generated by Django 2.2.4 on 2019-09-18 07:43
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('realtor', '0019_auto_20190918_1203'),
]
operations = [
migrations.AlterField(
model_name='realtor',
name='hire_date',
field=models.DateTimeField(default=datetime.datetime(2019, 9, 18, 12, 13, 29, 200152)),
),
]
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.